From 975f66f2eebe9dadba04f275774d4ab83f74cf25 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 14:04:41 +0200 Subject: Adding upstream version 7.7.0+dfsg. Signed-off-by: Daniel Baumann --- .../plugins/modules/aerospike_migrations.py | 529 +++++ .../general/plugins/modules/airbrake_deployment.py | 169 ++ .../general/plugins/modules/aix_devices.py | 377 +++ .../general/plugins/modules/aix_filesystem.py | 573 +++++ .../general/plugins/modules/aix_inittab.py | 255 ++ .../community/general/plugins/modules/aix_lvg.py | 371 +++ .../community/general/plugins/modules/aix_lvol.py | 347 +++ .../general/plugins/modules/alerta_customer.py | 207 ++ .../general/plugins/modules/ali_instance.py | 1012 ++++++++ .../general/plugins/modules/ali_instance_info.py | 407 ++++ .../general/plugins/modules/alternatives.py | 407 ++++ .../plugins/modules/ansible_galaxy_install.py | 374 +++ .../general/plugins/modules/apache2_mod_proxy.py | 452 ++++ .../general/plugins/modules/apache2_module.py | 297 +++ .../community/general/plugins/modules/apk.py | 378 +++ .../community/general/plugins/modules/apt_repo.py | 154 ++ .../community/general/plugins/modules/apt_rpm.py | 272 +++ .../community/general/plugins/modules/archive.py | 686 ++++++ .../general/plugins/modules/atomic_container.py | 217 ++ .../general/plugins/modules/atomic_host.py | 105 + .../general/plugins/modules/atomic_image.py | 177 ++ .../community/general/plugins/modules/awall.py | 164 ++ .../community/general/plugins/modules/beadm.py | 415 ++++ .../community/general/plugins/modules/bearychat.py | 175 ++ .../community/general/plugins/modules/bigpanda.py | 226 ++ .../plugins/modules/bitbucket_access_key.py | 281 +++ .../plugins/modules/bitbucket_pipeline_key_pair.py | 207 ++ .../modules/bitbucket_pipeline_known_host.py | 304 +++ .../plugins/modules/bitbucket_pipeline_variable.py | 276 +++ .../community/general/plugins/modules/bower.py | 236 ++ .../general/plugins/modules/btrfs_info.py | 109 + .../general/plugins/modules/btrfs_subvolume.py | 682 ++++++ .../community/general/plugins/modules/bundler.py | 211 ++ .../community/general/plugins/modules/bzr.py | 202 ++ .../community/general/plugins/modules/campfire.py | 162 ++ .../general/plugins/modules/capabilities.py | 188 ++ .../community/general/plugins/modules/cargo.py | 213 ++ .../community/general/plugins/modules/catapult.py | 162 ++ .../general/plugins/modules/circonus_annotation.py | 243 ++ .../general/plugins/modules/cisco_webex.py | 197 ++ .../general/plugins/modules/clc_aa_policy.py | 353 +++ .../general/plugins/modules/clc_alert_policy.py | 536 +++++ .../plugins/modules/clc_blueprint_package.py | 309 +++ .../general/plugins/modules/clc_firewall_policy.py | 596 +++++ .../community/general/plugins/modules/clc_group.py | 522 ++++ .../general/plugins/modules/clc_loadbalancer.py | 945 ++++++++ .../general/plugins/modules/clc_modify_server.py | 975 ++++++++ .../general/plugins/modules/clc_publicip.py | 369 +++ .../general/plugins/modules/clc_server.py | 1570 ++++++++++++ .../general/plugins/modules/clc_server_snapshot.py | 419 ++++ .../plugins/modules/cloud_init_data_facts.py | 133 ++ .../general/plugins/modules/cloudflare_dns.py | 893 +++++++ .../general/plugins/modules/cobbler_sync.py | 150 ++ .../general/plugins/modules/cobbler_system.py | 348 +++ .../community/general/plugins/modules/composer.py | 275 +++ .../community/general/plugins/modules/consul.py | 635 +++++ .../general/plugins/modules/consul_acl.py | 691 ++++++ .../community/general/plugins/modules/consul_kv.py | 336 +++ .../general/plugins/modules/consul_session.py | 307 +++ .../community/general/plugins/modules/copr.py | 500 ++++ .../community/general/plugins/modules/cpanm.py | 247 ++ .../community/general/plugins/modules/cronvar.py | 431 ++++ .../community/general/plugins/modules/crypttab.py | 362 +++ .../general/plugins/modules/datadog_downtime.py | 315 +++ .../general/plugins/modules/datadog_event.py | 193 ++ .../general/plugins/modules/datadog_monitor.py | 428 ++++ .../community/general/plugins/modules/dconf.py | 490 ++++ .../general/plugins/modules/deploy_helper.py | 535 +++++ .../plugins/modules/dimensiondata_network.py | 303 +++ .../general/plugins/modules/dimensiondata_vlan.py | 564 +++++ .../community/general/plugins/modules/discord.py | 223 ++ .../general/plugins/modules/django_manage.py | 418 ++++ .../general/plugins/modules/dnf_versionlock.py | 355 +++ .../community/general/plugins/modules/dnsimple.py | 434 ++++ .../general/plugins/modules/dnsimple_info.py | 329 +++ .../general/plugins/modules/dnsmadeeasy.py | 724 ++++++ .../general/plugins/modules/dpkg_divert.py | 369 +++ .../general/plugins/modules/easy_install.py | 206 ++ .../general/plugins/modules/ejabberd_user.py | 195 ++ .../plugins/modules/elasticsearch_plugin.py | 309 +++ .../general/plugins/modules/emc_vnx_sg_member.py | 181 ++ .../community/general/plugins/modules/etcd3.py | 261 ++ .../community/general/plugins/modules/facter.py | 80 + .../community/general/plugins/modules/filesize.py | 492 ++++ .../general/plugins/modules/filesystem.py | 606 +++++ .../community/general/plugins/modules/flatpak.py | 350 +++ .../general/plugins/modules/flatpak_remote.py | 273 +++ .../community/general/plugins/modules/flowdock.py | 205 ++ .../general/plugins/modules/gandi_livedns.py | 193 ++ .../general/plugins/modules/gconftool2.py | 163 ++ .../general/plugins/modules/gconftool2_info.py | 78 + .../community/general/plugins/modules/gem.py | 353 +++ .../general/plugins/modules/git_config.py | 290 +++ .../general/plugins/modules/github_deploy_key.py | 347 +++ .../general/plugins/modules/github_issue.py | 124 + .../general/plugins/modules/github_key.py | 250 ++ .../general/plugins/modules/github_release.py | 221 ++ .../general/plugins/modules/github_repo.py | 279 +++ .../general/plugins/modules/github_webhook.py | 303 +++ .../general/plugins/modules/github_webhook_info.py | 179 ++ .../general/plugins/modules/gitlab_branch.py | 183 ++ .../general/plugins/modules/gitlab_deploy_key.py | 301 +++ .../general/plugins/modules/gitlab_group.py | 400 ++++ .../plugins/modules/gitlab_group_members.py | 441 ++++ .../plugins/modules/gitlab_group_variable.py | 455 ++++ .../general/plugins/modules/gitlab_hook.py | 384 +++ .../general/plugins/modules/gitlab_project.py | 678 ++++++ .../plugins/modules/gitlab_project_badge.py | 216 ++ .../plugins/modules/gitlab_project_members.py | 449 ++++ .../plugins/modules/gitlab_project_variable.py | 486 ++++ .../plugins/modules/gitlab_protected_branch.py | 199 ++ .../general/plugins/modules/gitlab_runner.py | 466 ++++ .../general/plugins/modules/gitlab_user.py | 691 ++++++ .../community/general/plugins/modules/grove.py | 125 + .../community/general/plugins/modules/gunicorn.py | 233 ++ .../general/plugins/modules/hana_query.py | 219 ++ .../community/general/plugins/modules/haproxy.py | 488 ++++ .../general/plugins/modules/heroku_collaborator.py | 138 ++ .../community/general/plugins/modules/hg.py | 303 +++ .../community/general/plugins/modules/hipchat.py | 220 ++ .../community/general/plugins/modules/homebrew.py | 981 ++++++++ .../general/plugins/modules/homebrew_cask.py | 895 +++++++ .../general/plugins/modules/homebrew_tap.py | 279 +++ .../community/general/plugins/modules/homectl.py | 658 +++++ .../plugins/modules/honeybadger_deployment.py | 136 ++ .../general/plugins/modules/hpilo_boot.py | 218 ++ .../general/plugins/modules/hpilo_info.py | 271 +++ .../community/general/plugins/modules/hponcfg.py | 120 + .../community/general/plugins/modules/htpasswd.py | 286 +++ .../general/plugins/modules/hwc_ecs_instance.py | 2142 +++++++++++++++++ .../general/plugins/modules/hwc_evs_disk.py | 1217 ++++++++++ .../general/plugins/modules/hwc_network_vpc.py | 500 ++++ .../general/plugins/modules/hwc_smn_topic.py | 344 +++ .../general/plugins/modules/hwc_vpc_eip.py | 884 +++++++ .../plugins/modules/hwc_vpc_peering_connect.py | 698 ++++++ .../general/plugins/modules/hwc_vpc_port.py | 1167 +++++++++ .../general/plugins/modules/hwc_vpc_private_ip.py | 360 +++ .../general/plugins/modules/hwc_vpc_route.py | 443 ++++ .../plugins/modules/hwc_vpc_security_group.py | 650 +++++ .../plugins/modules/hwc_vpc_security_group_rule.py | 576 +++++ .../general/plugins/modules/hwc_vpc_subnet.py | 741 ++++++ .../general/plugins/modules/ibm_sa_domain.py | 174 ++ .../general/plugins/modules/ibm_sa_host.py | 131 + .../general/plugins/modules/ibm_sa_host_ports.py | 139 ++ .../general/plugins/modules/ibm_sa_pool.py | 128 + .../general/plugins/modules/ibm_sa_vol.py | 118 + .../general/plugins/modules/ibm_sa_vol_map.py | 148 ++ .../general/plugins/modules/icinga2_feature.py | 134 ++ .../general/plugins/modules/icinga2_host.py | 337 +++ .../plugins/modules/idrac_redfish_command.py | 246 ++ .../plugins/modules/idrac_redfish_config.py | 339 +++ .../general/plugins/modules/idrac_redfish_info.py | 251 ++ .../general/plugins/modules/ilo_redfish_command.py | 175 ++ .../general/plugins/modules/ilo_redfish_config.py | 194 ++ .../general/plugins/modules/ilo_redfish_info.py | 189 ++ .../community/general/plugins/modules/imc_rest.py | 441 ++++ .../community/general/plugins/modules/imgadm.py | 319 +++ .../community/general/plugins/modules/infinity.py | 575 +++++ .../general/plugins/modules/influxdb_database.py | 149 ++ .../general/plugins/modules/influxdb_query.py | 108 + .../plugins/modules/influxdb_retention_policy.py | 350 +++ .../general/plugins/modules/influxdb_user.py | 298 +++ .../general/plugins/modules/influxdb_write.py | 103 + .../community/general/plugins/modules/ini_file.py | 490 ++++ .../community/general/plugins/modules/installp.py | 300 +++ .../general/plugins/modules/interfaces_file.py | 416 ++++ .../community/general/plugins/modules/ip_netns.py | 140 ++ .../general/plugins/modules/ipa_config.py | 369 +++ .../general/plugins/modules/ipa_dnsrecord.py | 352 +++ .../general/plugins/modules/ipa_dnszone.py | 204 ++ .../community/general/plugins/modules/ipa_group.py | 342 +++ .../general/plugins/modules/ipa_hbacrule.py | 362 +++ .../community/general/plugins/modules/ipa_host.py | 312 +++ .../general/plugins/modules/ipa_hostgroup.py | 228 ++ .../general/plugins/modules/ipa_otpconfig.py | 179 ++ .../general/plugins/modules/ipa_otptoken.py | 534 +++++ .../general/plugins/modules/ipa_pwpolicy.py | 260 ++ .../community/general/plugins/modules/ipa_role.py | 309 +++ .../general/plugins/modules/ipa_service.py | 226 ++ .../community/general/plugins/modules/ipa_subca.py | 219 ++ .../general/plugins/modules/ipa_sudocmd.py | 158 ++ .../general/plugins/modules/ipa_sudocmdgroup.py | 186 ++ .../general/plugins/modules/ipa_sudorule.py | 471 ++++ .../community/general/plugins/modules/ipa_user.py | 404 ++++ .../community/general/plugins/modules/ipa_vault.py | 256 ++ .../general/plugins/modules/ipify_facts.py | 110 + .../general/plugins/modules/ipinfoio_facts.py | 136 ++ .../community/general/plugins/modules/ipmi_boot.py | 225 ++ .../general/plugins/modules/ipmi_power.py | 277 +++ .../general/plugins/modules/iptables_state.py | 654 +++++ .../general/plugins/modules/ipwcli_dns.py | 358 +++ .../community/general/plugins/modules/irc.py | 311 +++ .../general/plugins/modules/iso_create.py | 305 +++ .../general/plugins/modules/iso_customize.py | 347 +++ .../general/plugins/modules/iso_extract.py | 215 ++ .../community/general/plugins/modules/jabber.py | 174 ++ .../community/general/plugins/modules/java_cert.py | 585 +++++ .../general/plugins/modules/java_keystore.py | 584 +++++ .../community/general/plugins/modules/jboss.py | 185 ++ .../general/plugins/modules/jenkins_build.py | 297 +++ .../general/plugins/modules/jenkins_job.py | 386 +++ .../general/plugins/modules/jenkins_job_info.py | 262 ++ .../general/plugins/modules/jenkins_plugin.py | 854 +++++++ .../general/plugins/modules/jenkins_script.py | 206 ++ .../community/general/plugins/modules/jira.py | 828 +++++++ .../community/general/plugins/modules/kdeconfig.py | 277 +++ .../general/plugins/modules/kernel_blacklist.py | 126 + .../plugins/modules/keycloak_authentication.py | 483 ++++ .../modules/keycloak_authz_authorization_scope.py | 280 +++ .../general/plugins/modules/keycloak_client.py | 984 ++++++++ .../plugins/modules/keycloak_client_rolemapping.py | 361 +++ .../plugins/modules/keycloak_clientscope.py | 506 ++++ .../plugins/modules/keycloak_clientscope_type.py | 285 +++ .../plugins/modules/keycloak_clientsecret_info.py | 161 ++ .../modules/keycloak_clientsecret_regenerate.py | 174 ++ .../plugins/modules/keycloak_clienttemplate.py | 456 ++++ .../general/plugins/modules/keycloak_group.py | 496 ++++ .../plugins/modules/keycloak_identity_provider.py | 654 +++++ .../general/plugins/modules/keycloak_realm.py | 826 +++++++ .../general/plugins/modules/keycloak_realm_info.py | 138 ++ .../general/plugins/modules/keycloak_role.py | 374 +++ .../plugins/modules/keycloak_user_federation.py | 1021 ++++++++ .../plugins/modules/keycloak_user_rolemapping.py | 406 ++++ .../community/general/plugins/modules/keyring.py | 279 +++ .../general/plugins/modules/keyring_info.py | 156 ++ .../general/plugins/modules/kibana_plugin.py | 286 +++ .../community/general/plugins/modules/launchd.py | 522 ++++ .../community/general/plugins/modules/layman.py | 276 +++ .../community/general/plugins/modules/lbu.py | 138 ++ .../general/plugins/modules/ldap_attrs.py | 337 +++ .../general/plugins/modules/ldap_entry.py | 286 +++ .../general/plugins/modules/ldap_passwd.py | 151 ++ .../general/plugins/modules/ldap_search.py | 189 ++ .../general/plugins/modules/librato_annotation.py | 175 ++ .../community/general/plugins/modules/linode.py | 691 ++++++ .../community/general/plugins/modules/linode_v4.py | 319 +++ .../general/plugins/modules/listen_ports_facts.py | 428 ++++ .../community/general/plugins/modules/lldp.py | 88 + .../general/plugins/modules/locale_gen.py | 243 ++ .../general/plugins/modules/logentries.py | 164 ++ .../general/plugins/modules/logentries_msg.py | 105 + .../general/plugins/modules/logstash_plugin.py | 180 ++ .../community/general/plugins/modules/lvg.py | 338 +++ .../community/general/plugins/modules/lvol.py | 615 +++++ .../general/plugins/modules/lxc_container.py | 1742 ++++++++++++++ .../community/general/plugins/modules/lxca_cmms.py | 182 ++ .../general/plugins/modules/lxca_nodes.py | 212 ++ .../general/plugins/modules/lxd_container.py | 862 +++++++ .../general/plugins/modules/lxd_profile.py | 563 +++++ .../general/plugins/modules/lxd_project.py | 461 ++++ .../community/general/plugins/modules/macports.py | 326 +++ .../community/general/plugins/modules/mail.py | 418 ++++ .../community/general/plugins/modules/make.py | 233 ++ .../plugins/modules/manageiq_alert_profiles.py | 313 +++ .../general/plugins/modules/manageiq_alerts.py | 357 +++ .../general/plugins/modules/manageiq_group.py | 642 +++++ .../general/plugins/modules/manageiq_policies.py | 202 ++ .../plugins/modules/manageiq_policies_info.py | 121 + .../general/plugins/modules/manageiq_provider.py | 939 ++++++++ .../general/plugins/modules/manageiq_tags.py | 189 ++ .../general/plugins/modules/manageiq_tags_info.py | 113 + .../general/plugins/modules/manageiq_tenant.py | 550 +++++ .../general/plugins/modules/manageiq_user.py | 325 +++ .../community/general/plugins/modules/mas.py | 301 +++ .../community/general/plugins/modules/matrix.py | 147 ++ .../general/plugins/modules/mattermost.py | 187 ++ .../general/plugins/modules/maven_artifact.py | 762 ++++++ .../general/plugins/modules/memset_dns_reload.py | 194 ++ .../plugins/modules/memset_memstore_info.py | 180 ++ .../general/plugins/modules/memset_server_info.py | 305 +++ .../general/plugins/modules/memset_zone.py | 323 +++ .../general/plugins/modules/memset_zone_domain.py | 277 +++ .../general/plugins/modules/memset_zone_record.py | 393 +++ .../community/general/plugins/modules/mksysb.py | 171 ++ .../community/general/plugins/modules/modprobe.py | 320 +++ .../community/general/plugins/modules/monit.py | 349 +++ .../community/general/plugins/modules/mqtt.py | 257 ++ .../community/general/plugins/modules/mssql_db.py | 243 ++ .../general/plugins/modules/mssql_script.py | 313 +++ .../community/general/plugins/modules/nagios.py | 1255 ++++++++++ .../general/plugins/modules/netcup_dns.py | 296 +++ .../general/plugins/modules/newrelic_deployment.py | 182 ++ .../community/general/plugins/modules/nexmo.py | 143 ++ .../general/plugins/modules/nginx_status_info.py | 159 ++ .../community/general/plugins/modules/nictagadm.py | 232 ++ .../community/general/plugins/modules/nmcli.py | 2504 ++++++++++++++++++++ .../community/general/plugins/modules/nomad_job.py | 260 ++ .../general/plugins/modules/nomad_job_info.py | 343 +++ .../community/general/plugins/modules/nosh.py | 559 +++++ .../community/general/plugins/modules/npm.py | 342 +++ .../community/general/plugins/modules/nsupdate.py | 527 ++++ .../general/plugins/modules/ocapi_command.py | 274 +++ .../general/plugins/modules/ocapi_info.py | 224 ++ .../community/general/plugins/modules/oci_vcn.py | 229 ++ .../community/general/plugins/modules/odbc.py | 176 ++ .../plugins/modules/office_365_connector_card.py | 310 +++ .../community/general/plugins/modules/ohai.py | 55 + .../general/plugins/modules/omapi_host.py | 319 +++ .../community/general/plugins/modules/one_host.py | 292 +++ .../community/general/plugins/modules/one_image.py | 414 ++++ .../general/plugins/modules/one_image_info.py | 276 +++ .../general/plugins/modules/one_service.py | 759 ++++++ .../general/plugins/modules/one_template.py | 284 +++ .../community/general/plugins/modules/one_vm.py | 1725 ++++++++++++++ .../plugins/modules/oneandone_firewall_policy.py | 580 +++++ .../plugins/modules/oneandone_load_balancer.py | 684 ++++++ .../plugins/modules/oneandone_monitoring_policy.py | 1045 ++++++++ .../plugins/modules/oneandone_private_network.py | 455 ++++ .../general/plugins/modules/oneandone_public_ip.py | 338 +++ .../general/plugins/modules/oneandone_server.py | 704 ++++++ .../general/plugins/modules/onepassword_info.py | 390 +++ .../plugins/modules/oneview_datacenter_info.py | 168 ++ .../plugins/modules/oneview_enclosure_info.py | 252 ++ .../plugins/modules/oneview_ethernet_network.py | 257 ++ .../modules/oneview_ethernet_network_info.py | 177 ++ .../general/plugins/modules/oneview_fc_network.py | 131 + .../plugins/modules/oneview_fc_network_info.py | 118 + .../plugins/modules/oneview_fcoe_network.py | 127 + .../plugins/modules/oneview_fcoe_network_info.py | 116 + .../modules/oneview_logical_interconnect_group.py | 174 ++ .../oneview_logical_interconnect_group_info.py | 129 + .../general/plugins/modules/oneview_network_set.py | 160 ++ .../plugins/modules/oneview_network_set_info.py | 176 ++ .../general/plugins/modules/oneview_san_manager.py | 225 ++ .../plugins/modules/oneview_san_manager_info.py | 129 + .../general/plugins/modules/online_server_info.py | 180 ++ .../general/plugins/modules/online_user_info.py | 78 + .../general/plugins/modules/open_iscsi.py | 464 ++++ .../general/plugins/modules/openbsd_pkg.py | 664 ++++++ .../general/plugins/modules/opendj_backendprop.py | 216 ++ .../general/plugins/modules/openwrt_init.py | 191 ++ .../community/general/plugins/modules/opkg.py | 212 ++ .../general/plugins/modules/osx_defaults.py | 409 ++++ .../general/plugins/modules/ovh_ip_failover.py | 269 +++ .../modules/ovh_ip_loadbalancing_backend.py | 320 +++ .../general/plugins/modules/ovh_monthly_billing.py | 163 ++ .../general/plugins/modules/pacemaker_cluster.py | 230 ++ .../general/plugins/modules/packet_device.py | 682 ++++++ .../general/plugins/modules/packet_ip_subnet.py | 336 +++ .../general/plugins/modules/packet_project.py | 254 ++ .../general/plugins/modules/packet_sshkey.py | 278 +++ .../general/plugins/modules/packet_volume.py | 331 +++ .../plugins/modules/packet_volume_attachment.py | 308 +++ .../community/general/plugins/modules/pacman.py | 859 +++++++ .../general/plugins/modules/pacman_key.py | 321 +++ .../community/general/plugins/modules/pagerduty.py | 288 +++ .../general/plugins/modules/pagerduty_alert.py | 263 ++ .../general/plugins/modules/pagerduty_change.py | 200 ++ .../general/plugins/modules/pagerduty_user.py | 254 ++ .../general/plugins/modules/pam_limits.py | 357 +++ .../community/general/plugins/modules/pamd.py | 853 +++++++ .../community/general/plugins/modules/parted.py | 810 +++++++ .../community/general/plugins/modules/pear.py | 327 +++ .../community/general/plugins/modules/pids.py | 234 ++ .../community/general/plugins/modules/pingdom.py | 149 ++ .../general/plugins/modules/pip_package_info.py | 156 ++ .../community/general/plugins/modules/pipx.py | 330 +++ .../community/general/plugins/modules/pipx_info.py | 212 ++ .../community/general/plugins/modules/pkg5.py | 186 ++ .../general/plugins/modules/pkg5_publisher.py | 210 ++ .../community/general/plugins/modules/pkgin.py | 396 ++++ .../community/general/plugins/modules/pkgng.py | 540 +++++ .../community/general/plugins/modules/pkgutil.py | 301 +++ .../community/general/plugins/modules/pmem.py | 637 +++++ .../community/general/plugins/modules/portage.py | 587 +++++ .../general/plugins/modules/portinstall.py | 216 ++ .../general/plugins/modules/pritunl_org.py | 206 ++ .../general/plugins/modules/pritunl_org_info.py | 132 ++ .../general/plugins/modules/pritunl_user.py | 361 +++ .../general/plugins/modules/pritunl_user_info.py | 174 ++ .../general/plugins/modules/profitbricks.py | 667 ++++++ .../plugins/modules/profitbricks_datacenter.py | 266 +++ .../general/plugins/modules/profitbricks_nic.py | 297 +++ .../general/plugins/modules/profitbricks_volume.py | 440 ++++ .../modules/profitbricks_volume_attachments.py | 267 +++ .../community/general/plugins/modules/proxmox.py | 826 +++++++ .../general/plugins/modules/proxmox_disk.py | 767 ++++++ .../general/plugins/modules/proxmox_domain_info.py | 134 ++ .../general/plugins/modules/proxmox_group_info.py | 144 ++ .../general/plugins/modules/proxmox_kvm.py | 1433 +++++++++++ .../general/plugins/modules/proxmox_nic.py | 311 +++ .../general/plugins/modules/proxmox_snap.py | 363 +++ .../plugins/modules/proxmox_storage_info.py | 191 ++ .../general/plugins/modules/proxmox_tasks_info.py | 185 ++ .../general/plugins/modules/proxmox_template.py | 247 ++ .../general/plugins/modules/proxmox_user_info.py | 257 ++ .../general/plugins/modules/pubnub_blocks.py | 639 +++++ .../community/general/plugins/modules/pulp_repo.py | 743 ++++++ .../community/general/plugins/modules/puppet.py | 281 +++ .../general/plugins/modules/pushbullet.py | 197 ++ .../community/general/plugins/modules/pushover.py | 161 ++ .../plugins/modules/python_requirements_info.py | 215 ++ .../community/general/plugins/modules/rax.py | 903 +++++++ .../community/general/plugins/modules/rax_cbs.py | 237 ++ .../general/plugins/modules/rax_cbs_attachments.py | 228 ++ .../community/general/plugins/modules/rax_cdb.py | 268 +++ .../general/plugins/modules/rax_cdb_database.py | 181 ++ .../general/plugins/modules/rax_cdb_user.py | 229 ++ .../community/general/plugins/modules/rax_clb.py | 322 +++ .../general/plugins/modules/rax_clb_nodes.py | 293 +++ .../general/plugins/modules/rax_clb_ssl.py | 291 +++ .../community/general/plugins/modules/rax_dns.py | 182 ++ .../general/plugins/modules/rax_dns_record.py | 362 +++ .../community/general/plugins/modules/rax_facts.py | 154 ++ .../community/general/plugins/modules/rax_files.py | 402 ++++ .../general/plugins/modules/rax_files_objects.py | 558 +++++ .../general/plugins/modules/rax_identity.py | 112 + .../general/plugins/modules/rax_keypair.py | 181 ++ .../community/general/plugins/modules/rax_meta.py | 184 ++ .../general/plugins/modules/rax_mon_alarm.py | 237 ++ .../general/plugins/modules/rax_mon_check.py | 331 +++ .../general/plugins/modules/rax_mon_entity.py | 203 ++ .../plugins/modules/rax_mon_notification.py | 182 ++ .../plugins/modules/rax_mon_notification_plan.py | 193 ++ .../general/plugins/modules/rax_network.py | 146 ++ .../community/general/plugins/modules/rax_queue.py | 147 ++ .../general/plugins/modules/rax_scaling_group.py | 441 ++++ .../general/plugins/modules/rax_scaling_policy.py | 294 +++ .../community/general/plugins/modules/read_csv.py | 221 ++ .../general/plugins/modules/redfish_command.py | 959 ++++++++ .../general/plugins/modules/redfish_config.py | 444 ++++ .../general/plugins/modules/redfish_info.py | 569 +++++ .../general/plugins/modules/redhat_subscription.py | 1237 ++++++++++ .../community/general/plugins/modules/redis.py | 335 +++ .../general/plugins/modules/redis_data.py | 257 ++ .../general/plugins/modules/redis_data_incr.py | 193 ++ .../general/plugins/modules/redis_data_info.py | 116 + .../general/plugins/modules/redis_info.py | 240 ++ .../community/general/plugins/modules/rhevm.py | 1506 ++++++++++++ .../general/plugins/modules/rhn_channel.py | 200 ++ .../general/plugins/modules/rhn_register.py | 455 ++++ .../general/plugins/modules/rhsm_release.py | 141 ++ .../general/plugins/modules/rhsm_repository.py | 260 ++ .../community/general/plugins/modules/riak.py | 238 ++ .../general/plugins/modules/rocketchat.py | 250 ++ .../general/plugins/modules/rollbar_deployment.py | 151 ++ .../general/plugins/modules/rpm_ostree_pkg.py | 180 ++ .../general/plugins/modules/rundeck_acl_policy.py | 239 ++ .../plugins/modules/rundeck_job_executions_info.py | 192 ++ .../general/plugins/modules/rundeck_job_run.py | 322 +++ .../general/plugins/modules/rundeck_project.py | 197 ++ .../community/general/plugins/modules/runit.py | 262 ++ .../plugins/modules/sap_task_list_execute.py | 348 +++ .../general/plugins/modules/sapcar_extract.py | 228 ++ .../community/general/plugins/modules/say.py | 99 + .../general/plugins/modules/scaleway_compute.py | 699 ++++++ .../modules/scaleway_compute_private_network.py | 217 ++ .../general/plugins/modules/scaleway_container.py | 412 ++++ .../plugins/modules/scaleway_container_info.py | 152 ++ .../modules/scaleway_container_namespace.py | 296 +++ .../modules/scaleway_container_namespace_info.py | 143 ++ .../plugins/modules/scaleway_container_registry.py | 272 +++ .../modules/scaleway_container_registry_info.py | 142 ++ .../plugins/modules/scaleway_database_backup.py | 379 +++ .../general/plugins/modules/scaleway_function.py | 394 +++ .../plugins/modules/scaleway_function_info.py | 151 ++ .../plugins/modules/scaleway_function_namespace.py | 298 +++ .../modules/scaleway_function_namespace_info.py | 143 ++ .../general/plugins/modules/scaleway_image_info.py | 130 + .../general/plugins/modules/scaleway_ip.py | 270 +++ .../general/plugins/modules/scaleway_ip_info.py | 114 + .../general/plugins/modules/scaleway_lb.py | 366 +++ .../plugins/modules/scaleway_organization_info.py | 108 + .../plugins/modules/scaleway_private_network.py | 241 ++ .../plugins/modules/scaleway_security_group.py | 245 ++ .../modules/scaleway_security_group_info.py | 118 + .../modules/scaleway_security_group_rule.py | 282 +++ .../plugins/modules/scaleway_server_info.py | 201 ++ .../plugins/modules/scaleway_snapshot_info.py | 119 + .../general/plugins/modules/scaleway_sshkey.py | 179 ++ .../general/plugins/modules/scaleway_user_data.py | 179 ++ .../general/plugins/modules/scaleway_volume.py | 201 ++ .../plugins/modules/scaleway_volume_info.py | 114 + .../general/plugins/modules/sefcontext.py | 385 +++ .../general/plugins/modules/selinux_permissive.py | 135 ++ .../community/general/plugins/modules/selogin.py | 254 ++ .../community/general/plugins/modules/sendgrid.py | 280 +++ .../general/plugins/modules/sensu_check.py | 376 +++ .../general/plugins/modules/sensu_client.py | 269 +++ .../general/plugins/modules/sensu_handler.py | 281 +++ .../general/plugins/modules/sensu_silence.py | 305 +++ .../general/plugins/modules/sensu_subscription.py | 160 ++ .../community/general/plugins/modules/seport.py | 331 +++ .../general/plugins/modules/serverless.py | 224 ++ .../community/general/plugins/modules/shutdown.py | 81 + .../community/general/plugins/modules/sl_vm.py | 439 ++++ .../community/general/plugins/modules/slack.py | 521 ++++ .../community/general/plugins/modules/slackpkg.py | 211 ++ .../general/plugins/modules/smartos_image_info.py | 119 + .../community/general/plugins/modules/snap.py | 413 ++++ .../general/plugins/modules/snap_alias.py | 195 ++ .../general/plugins/modules/snmp_facts.py | 475 ++++ .../general/plugins/modules/solaris_zone.py | 493 ++++ .../community/general/plugins/modules/sorcery.py | 653 +++++ .../general/plugins/modules/spectrum_device.py | 336 +++ .../plugins/modules/spectrum_model_attrs.py | 536 +++++ .../plugins/modules/spotinst_aws_elastigroup.py | 1595 +++++++++++++ .../general/plugins/modules/ss_3par_cpg.py | 302 +++ .../general/plugins/modules/ssh_config.py | 341 +++ .../general/plugins/modules/stackdriver.py | 223 ++ .../general/plugins/modules/stacki_host.py | 303 +++ .../community/general/plugins/modules/statsd.py | 179 ++ .../plugins/modules/statusio_maintenance.py | 475 ++++ .../community/general/plugins/modules/sudoers.py | 309 +++ .../general/plugins/modules/supervisorctl.py | 270 +++ .../community/general/plugins/modules/svc.py | 305 +++ .../community/general/plugins/modules/svr4pkg.py | 270 +++ .../community/general/plugins/modules/swdepot.py | 214 ++ .../community/general/plugins/modules/swupd.py | 322 +++ .../community/general/plugins/modules/syslogger.py | 198 ++ .../community/general/plugins/modules/syspatch.py | 175 ++ .../community/general/plugins/modules/sysrc.py | 259 ++ .../general/plugins/modules/sysupgrade.py | 161 ++ .../general/plugins/modules/taiga_issue.py | 319 +++ .../community/general/plugins/modules/telegram.py | 146 ++ .../community/general/plugins/modules/terraform.py | 659 ++++++ .../community/general/plugins/modules/timezone.py | 923 ++++++++ .../community/general/plugins/modules/twilio.py | 182 ++ .../community/general/plugins/modules/typetalk.py | 136 ++ .../general/plugins/modules/udm_dns_record.py | 233 ++ .../general/plugins/modules/udm_dns_zone.py | 248 ++ .../community/general/plugins/modules/udm_group.py | 184 ++ .../community/general/plugins/modules/udm_share.py | 579 +++++ .../community/general/plugins/modules/udm_user.py | 573 +++++ .../community/general/plugins/modules/ufw.py | 606 +++++ .../general/plugins/modules/uptimerobot.py | 157 ++ .../community/general/plugins/modules/urpmi.py | 221 ++ .../general/plugins/modules/utm_aaa_group.py | 239 ++ .../general/plugins/modules/utm_aaa_group_info.py | 131 + .../plugins/modules/utm_ca_host_key_cert.py | 168 ++ .../plugins/modules/utm_ca_host_key_cert_info.py | 109 + .../general/plugins/modules/utm_dns_host.py | 167 ++ .../modules/utm_network_interface_address.py | 143 ++ .../modules/utm_network_interface_address_info.py | 104 + .../plugins/modules/utm_proxy_auth_profile.py | 355 +++ .../general/plugins/modules/utm_proxy_exception.py | 249 ++ .../general/plugins/modules/utm_proxy_frontend.py | 286 +++ .../plugins/modules/utm_proxy_frontend_info.py | 149 ++ .../general/plugins/modules/utm_proxy_location.py | 226 ++ .../plugins/modules/utm_proxy_location_info.py | 130 + .../community/general/plugins/modules/vdo.py | 781 ++++++ .../plugins/modules/vertica_configuration.py | 204 ++ .../general/plugins/modules/vertica_info.py | 287 +++ .../general/plugins/modules/vertica_role.py | 254 ++ .../general/plugins/modules/vertica_schema.py | 327 +++ .../general/plugins/modules/vertica_user.py | 393 +++ .../community/general/plugins/modules/vexata_eg.py | 216 ++ .../general/plugins/modules/vexata_volume.py | 203 ++ .../community/general/plugins/modules/vmadm.py | 790 ++++++ .../community/general/plugins/modules/wakeonlan.py | 139 ++ .../general/plugins/modules/wdc_redfish_command.py | 345 +++ .../general/plugins/modules/wdc_redfish_info.py | 208 ++ .../general/plugins/modules/webfaction_app.py | 207 ++ .../general/plugins/modules/webfaction_db.py | 203 ++ .../general/plugins/modules/webfaction_domain.py | 178 ++ .../general/plugins/modules/webfaction_mailbox.py | 146 ++ .../general/plugins/modules/webfaction_site.py | 217 ++ .../community/general/plugins/modules/xattr.py | 247 ++ .../community/general/plugins/modules/xbps.py | 351 +++ .../general/plugins/modules/xcc_redfish_command.py | 795 +++++++ .../general/plugins/modules/xenserver_facts.py | 212 ++ .../general/plugins/modules/xenserver_guest.py | 2033 ++++++++++++++++ .../plugins/modules/xenserver_guest_info.py | 224 ++ .../plugins/modules/xenserver_guest_powerstate.py | 275 +++ .../community/general/plugins/modules/xfconf.py | 291 +++ .../general/plugins/modules/xfconf_info.py | 191 ++ .../community/general/plugins/modules/xfs_quota.py | 504 ++++ .../community/general/plugins/modules/xml.py | 996 ++++++++ .../community/general/plugins/modules/yarn.py | 408 ++++ .../general/plugins/modules/yum_versionlock.py | 180 ++ .../community/general/plugins/modules/zfs.py | 295 +++ .../general/plugins/modules/zfs_delegate_admin.py | 276 +++ .../community/general/plugins/modules/zfs_facts.py | 260 ++ .../community/general/plugins/modules/znode.py | 301 +++ .../general/plugins/modules/zpool_facts.py | 199 ++ .../community/general/plugins/modules/zypper.py | 607 +++++ .../general/plugins/modules/zypper_repository.py | 474 ++++ 577 files changed, 204814 insertions(+) create mode 100644 ansible_collections/community/general/plugins/modules/aerospike_migrations.py create mode 100644 ansible_collections/community/general/plugins/modules/airbrake_deployment.py create mode 100644 ansible_collections/community/general/plugins/modules/aix_devices.py create mode 100644 ansible_collections/community/general/plugins/modules/aix_filesystem.py create mode 100644 ansible_collections/community/general/plugins/modules/aix_inittab.py create mode 100644 ansible_collections/community/general/plugins/modules/aix_lvg.py create mode 100644 ansible_collections/community/general/plugins/modules/aix_lvol.py create mode 100644 ansible_collections/community/general/plugins/modules/alerta_customer.py create mode 100644 ansible_collections/community/general/plugins/modules/ali_instance.py create mode 100644 ansible_collections/community/general/plugins/modules/ali_instance_info.py create mode 100644 ansible_collections/community/general/plugins/modules/alternatives.py create mode 100644 ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py create mode 100644 ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py create mode 100644 ansible_collections/community/general/plugins/modules/apache2_module.py create mode 100644 ansible_collections/community/general/plugins/modules/apk.py create mode 100644 ansible_collections/community/general/plugins/modules/apt_repo.py create mode 100644 ansible_collections/community/general/plugins/modules/apt_rpm.py create mode 100644 ansible_collections/community/general/plugins/modules/archive.py create mode 100644 ansible_collections/community/general/plugins/modules/atomic_container.py create mode 100644 ansible_collections/community/general/plugins/modules/atomic_host.py create mode 100644 ansible_collections/community/general/plugins/modules/atomic_image.py create mode 100644 ansible_collections/community/general/plugins/modules/awall.py create mode 100644 ansible_collections/community/general/plugins/modules/beadm.py create mode 100644 ansible_collections/community/general/plugins/modules/bearychat.py create mode 100644 ansible_collections/community/general/plugins/modules/bigpanda.py create mode 100644 ansible_collections/community/general/plugins/modules/bitbucket_access_key.py create mode 100644 ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py create mode 100644 ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py create mode 100644 ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py create mode 100644 ansible_collections/community/general/plugins/modules/bower.py create mode 100644 ansible_collections/community/general/plugins/modules/btrfs_info.py create mode 100644 ansible_collections/community/general/plugins/modules/btrfs_subvolume.py create mode 100644 ansible_collections/community/general/plugins/modules/bundler.py create mode 100644 ansible_collections/community/general/plugins/modules/bzr.py create mode 100644 ansible_collections/community/general/plugins/modules/campfire.py create mode 100644 ansible_collections/community/general/plugins/modules/capabilities.py create mode 100644 ansible_collections/community/general/plugins/modules/cargo.py create mode 100644 ansible_collections/community/general/plugins/modules/catapult.py create mode 100644 ansible_collections/community/general/plugins/modules/circonus_annotation.py create mode 100644 ansible_collections/community/general/plugins/modules/cisco_webex.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_aa_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_alert_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_blueprint_package.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_firewall_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_group.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_loadbalancer.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_modify_server.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_publicip.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_server.py create mode 100644 ansible_collections/community/general/plugins/modules/clc_server_snapshot.py create mode 100644 ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/cloudflare_dns.py create mode 100644 ansible_collections/community/general/plugins/modules/cobbler_sync.py create mode 100644 ansible_collections/community/general/plugins/modules/cobbler_system.py create mode 100644 ansible_collections/community/general/plugins/modules/composer.py create mode 100644 ansible_collections/community/general/plugins/modules/consul.py create mode 100644 ansible_collections/community/general/plugins/modules/consul_acl.py create mode 100644 ansible_collections/community/general/plugins/modules/consul_kv.py create mode 100644 ansible_collections/community/general/plugins/modules/consul_session.py create mode 100644 ansible_collections/community/general/plugins/modules/copr.py create mode 100644 ansible_collections/community/general/plugins/modules/cpanm.py create mode 100644 ansible_collections/community/general/plugins/modules/cronvar.py create mode 100644 ansible_collections/community/general/plugins/modules/crypttab.py create mode 100644 ansible_collections/community/general/plugins/modules/datadog_downtime.py create mode 100644 ansible_collections/community/general/plugins/modules/datadog_event.py create mode 100644 ansible_collections/community/general/plugins/modules/datadog_monitor.py create mode 100644 ansible_collections/community/general/plugins/modules/dconf.py create mode 100644 ansible_collections/community/general/plugins/modules/deploy_helper.py create mode 100644 ansible_collections/community/general/plugins/modules/dimensiondata_network.py create mode 100644 ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py create mode 100644 ansible_collections/community/general/plugins/modules/discord.py create mode 100644 ansible_collections/community/general/plugins/modules/django_manage.py create mode 100644 ansible_collections/community/general/plugins/modules/dnf_versionlock.py create mode 100644 ansible_collections/community/general/plugins/modules/dnsimple.py create mode 100644 ansible_collections/community/general/plugins/modules/dnsimple_info.py create mode 100644 ansible_collections/community/general/plugins/modules/dnsmadeeasy.py create mode 100644 ansible_collections/community/general/plugins/modules/dpkg_divert.py create mode 100644 ansible_collections/community/general/plugins/modules/easy_install.py create mode 100644 ansible_collections/community/general/plugins/modules/ejabberd_user.py create mode 100644 ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py create mode 100644 ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py create mode 100644 ansible_collections/community/general/plugins/modules/etcd3.py create mode 100644 ansible_collections/community/general/plugins/modules/facter.py create mode 100644 ansible_collections/community/general/plugins/modules/filesize.py create mode 100644 ansible_collections/community/general/plugins/modules/filesystem.py create mode 100644 ansible_collections/community/general/plugins/modules/flatpak.py create mode 100644 ansible_collections/community/general/plugins/modules/flatpak_remote.py create mode 100644 ansible_collections/community/general/plugins/modules/flowdock.py create mode 100644 ansible_collections/community/general/plugins/modules/gandi_livedns.py create mode 100644 ansible_collections/community/general/plugins/modules/gconftool2.py create mode 100644 ansible_collections/community/general/plugins/modules/gconftool2_info.py create mode 100644 ansible_collections/community/general/plugins/modules/gem.py create mode 100644 ansible_collections/community/general/plugins/modules/git_config.py create mode 100644 ansible_collections/community/general/plugins/modules/github_deploy_key.py create mode 100644 ansible_collections/community/general/plugins/modules/github_issue.py create mode 100644 ansible_collections/community/general/plugins/modules/github_key.py create mode 100644 ansible_collections/community/general/plugins/modules/github_release.py create mode 100644 ansible_collections/community/general/plugins/modules/github_repo.py create mode 100644 ansible_collections/community/general/plugins/modules/github_webhook.py create mode 100644 ansible_collections/community/general/plugins/modules/github_webhook_info.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_branch.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_group.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_group_members.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_group_variable.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_hook.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_project.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_project_badge.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_project_members.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_project_variable.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_runner.py create mode 100644 ansible_collections/community/general/plugins/modules/gitlab_user.py create mode 100644 ansible_collections/community/general/plugins/modules/grove.py create mode 100644 ansible_collections/community/general/plugins/modules/gunicorn.py create mode 100644 ansible_collections/community/general/plugins/modules/hana_query.py create mode 100644 ansible_collections/community/general/plugins/modules/haproxy.py create mode 100644 ansible_collections/community/general/plugins/modules/heroku_collaborator.py create mode 100644 ansible_collections/community/general/plugins/modules/hg.py create mode 100644 ansible_collections/community/general/plugins/modules/hipchat.py create mode 100644 ansible_collections/community/general/plugins/modules/homebrew.py create mode 100644 ansible_collections/community/general/plugins/modules/homebrew_cask.py create mode 100644 ansible_collections/community/general/plugins/modules/homebrew_tap.py create mode 100644 ansible_collections/community/general/plugins/modules/homectl.py create mode 100644 ansible_collections/community/general/plugins/modules/honeybadger_deployment.py create mode 100644 ansible_collections/community/general/plugins/modules/hpilo_boot.py create mode 100644 ansible_collections/community/general/plugins/modules/hpilo_info.py create mode 100644 ansible_collections/community/general/plugins/modules/hponcfg.py create mode 100644 ansible_collections/community/general/plugins/modules/htpasswd.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_evs_disk.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_network_vpc.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_smn_topic.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_port.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_route.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py create mode 100644 ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_domain.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_host.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_pool.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_vol.py create mode 100644 ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py create mode 100644 ansible_collections/community/general/plugins/modules/icinga2_feature.py create mode 100644 ansible_collections/community/general/plugins/modules/icinga2_host.py create mode 100644 ansible_collections/community/general/plugins/modules/idrac_redfish_command.py create mode 100644 ansible_collections/community/general/plugins/modules/idrac_redfish_config.py create mode 100644 ansible_collections/community/general/plugins/modules/idrac_redfish_info.py create mode 100644 ansible_collections/community/general/plugins/modules/ilo_redfish_command.py create mode 100644 ansible_collections/community/general/plugins/modules/ilo_redfish_config.py create mode 100644 ansible_collections/community/general/plugins/modules/ilo_redfish_info.py create mode 100644 ansible_collections/community/general/plugins/modules/imc_rest.py create mode 100644 ansible_collections/community/general/plugins/modules/imgadm.py create mode 100644 ansible_collections/community/general/plugins/modules/infinity.py create mode 100644 ansible_collections/community/general/plugins/modules/influxdb_database.py create mode 100644 ansible_collections/community/general/plugins/modules/influxdb_query.py create mode 100644 ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/influxdb_user.py create mode 100644 ansible_collections/community/general/plugins/modules/influxdb_write.py create mode 100644 ansible_collections/community/general/plugins/modules/ini_file.py create mode 100644 ansible_collections/community/general/plugins/modules/installp.py create mode 100644 ansible_collections/community/general/plugins/modules/interfaces_file.py create mode 100644 ansible_collections/community/general/plugins/modules/ip_netns.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_config.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_dnszone.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_group.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_hbacrule.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_host.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_hostgroup.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_otpconfig.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_otptoken.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_role.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_service.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_subca.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_sudocmd.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_sudorule.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_user.py create mode 100644 ansible_collections/community/general/plugins/modules/ipa_vault.py create mode 100644 ansible_collections/community/general/plugins/modules/ipify_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/ipinfoio_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/ipmi_boot.py create mode 100644 ansible_collections/community/general/plugins/modules/ipmi_power.py create mode 100644 ansible_collections/community/general/plugins/modules/iptables_state.py create mode 100644 ansible_collections/community/general/plugins/modules/ipwcli_dns.py create mode 100644 ansible_collections/community/general/plugins/modules/irc.py create mode 100644 ansible_collections/community/general/plugins/modules/iso_create.py create mode 100644 ansible_collections/community/general/plugins/modules/iso_customize.py create mode 100644 ansible_collections/community/general/plugins/modules/iso_extract.py create mode 100644 ansible_collections/community/general/plugins/modules/jabber.py create mode 100644 ansible_collections/community/general/plugins/modules/java_cert.py create mode 100644 ansible_collections/community/general/plugins/modules/java_keystore.py create mode 100644 ansible_collections/community/general/plugins/modules/jboss.py create mode 100644 ansible_collections/community/general/plugins/modules/jenkins_build.py create mode 100644 ansible_collections/community/general/plugins/modules/jenkins_job.py create mode 100644 ansible_collections/community/general/plugins/modules/jenkins_job_info.py create mode 100644 ansible_collections/community/general/plugins/modules/jenkins_plugin.py create mode 100644 ansible_collections/community/general/plugins/modules/jenkins_script.py create mode 100644 ansible_collections/community/general/plugins/modules/jira.py create mode 100644 ansible_collections/community/general/plugins/modules/kdeconfig.py create mode 100644 ansible_collections/community/general/plugins/modules/kernel_blacklist.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_authentication.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_client.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_clientscope.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_group.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_realm.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_realm_info.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_role.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_user_federation.py create mode 100644 ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py create mode 100644 ansible_collections/community/general/plugins/modules/keyring.py create mode 100644 ansible_collections/community/general/plugins/modules/keyring_info.py create mode 100644 ansible_collections/community/general/plugins/modules/kibana_plugin.py create mode 100644 ansible_collections/community/general/plugins/modules/launchd.py create mode 100644 ansible_collections/community/general/plugins/modules/layman.py create mode 100644 ansible_collections/community/general/plugins/modules/lbu.py create mode 100644 ansible_collections/community/general/plugins/modules/ldap_attrs.py create mode 100644 ansible_collections/community/general/plugins/modules/ldap_entry.py create mode 100644 ansible_collections/community/general/plugins/modules/ldap_passwd.py create mode 100644 ansible_collections/community/general/plugins/modules/ldap_search.py create mode 100644 ansible_collections/community/general/plugins/modules/librato_annotation.py create mode 100644 ansible_collections/community/general/plugins/modules/linode.py create mode 100644 ansible_collections/community/general/plugins/modules/linode_v4.py create mode 100644 ansible_collections/community/general/plugins/modules/listen_ports_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/lldp.py create mode 100644 ansible_collections/community/general/plugins/modules/locale_gen.py create mode 100644 ansible_collections/community/general/plugins/modules/logentries.py create mode 100644 ansible_collections/community/general/plugins/modules/logentries_msg.py create mode 100644 ansible_collections/community/general/plugins/modules/logstash_plugin.py create mode 100644 ansible_collections/community/general/plugins/modules/lvg.py create mode 100644 ansible_collections/community/general/plugins/modules/lvol.py create mode 100644 ansible_collections/community/general/plugins/modules/lxc_container.py create mode 100644 ansible_collections/community/general/plugins/modules/lxca_cmms.py create mode 100644 ansible_collections/community/general/plugins/modules/lxca_nodes.py create mode 100644 ansible_collections/community/general/plugins/modules/lxd_container.py create mode 100644 ansible_collections/community/general/plugins/modules/lxd_profile.py create mode 100644 ansible_collections/community/general/plugins/modules/lxd_project.py create mode 100644 ansible_collections/community/general/plugins/modules/macports.py create mode 100644 ansible_collections/community/general/plugins/modules/mail.py create mode 100644 ansible_collections/community/general/plugins/modules/make.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_alerts.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_group.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_policies.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_policies_info.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_provider.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_tags.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_tags_info.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_tenant.py create mode 100644 ansible_collections/community/general/plugins/modules/manageiq_user.py create mode 100644 ansible_collections/community/general/plugins/modules/mas.py create mode 100644 ansible_collections/community/general/plugins/modules/matrix.py create mode 100644 ansible_collections/community/general/plugins/modules/mattermost.py create mode 100644 ansible_collections/community/general/plugins/modules/maven_artifact.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_dns_reload.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_memstore_info.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_server_info.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_zone.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_zone_domain.py create mode 100644 ansible_collections/community/general/plugins/modules/memset_zone_record.py create mode 100644 ansible_collections/community/general/plugins/modules/mksysb.py create mode 100644 ansible_collections/community/general/plugins/modules/modprobe.py create mode 100644 ansible_collections/community/general/plugins/modules/monit.py create mode 100644 ansible_collections/community/general/plugins/modules/mqtt.py create mode 100644 ansible_collections/community/general/plugins/modules/mssql_db.py create mode 100644 ansible_collections/community/general/plugins/modules/mssql_script.py create mode 100644 ansible_collections/community/general/plugins/modules/nagios.py create mode 100644 ansible_collections/community/general/plugins/modules/netcup_dns.py create mode 100644 ansible_collections/community/general/plugins/modules/newrelic_deployment.py create mode 100644 ansible_collections/community/general/plugins/modules/nexmo.py create mode 100644 ansible_collections/community/general/plugins/modules/nginx_status_info.py create mode 100644 ansible_collections/community/general/plugins/modules/nictagadm.py create mode 100644 ansible_collections/community/general/plugins/modules/nmcli.py create mode 100644 ansible_collections/community/general/plugins/modules/nomad_job.py create mode 100644 ansible_collections/community/general/plugins/modules/nomad_job_info.py create mode 100644 ansible_collections/community/general/plugins/modules/nosh.py create mode 100644 ansible_collections/community/general/plugins/modules/npm.py create mode 100644 ansible_collections/community/general/plugins/modules/nsupdate.py create mode 100644 ansible_collections/community/general/plugins/modules/ocapi_command.py create mode 100644 ansible_collections/community/general/plugins/modules/ocapi_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oci_vcn.py create mode 100644 ansible_collections/community/general/plugins/modules/odbc.py create mode 100644 ansible_collections/community/general/plugins/modules/office_365_connector_card.py create mode 100644 ansible_collections/community/general/plugins/modules/ohai.py create mode 100644 ansible_collections/community/general/plugins/modules/omapi_host.py create mode 100644 ansible_collections/community/general/plugins/modules/one_host.py create mode 100644 ansible_collections/community/general/plugins/modules/one_image.py create mode 100644 ansible_collections/community/general/plugins/modules/one_image_info.py create mode 100644 ansible_collections/community/general/plugins/modules/one_service.py create mode 100644 ansible_collections/community/general/plugins/modules/one_template.py create mode 100644 ansible_collections/community/general/plugins/modules/one_vm.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_private_network.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_public_ip.py create mode 100644 ansible_collections/community/general/plugins/modules/oneandone_server.py create mode 100644 ansible_collections/community/general/plugins/modules/onepassword_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_fc_network.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_network_set.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_network_set_info.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_san_manager.py create mode 100644 ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py create mode 100644 ansible_collections/community/general/plugins/modules/online_server_info.py create mode 100644 ansible_collections/community/general/plugins/modules/online_user_info.py create mode 100644 ansible_collections/community/general/plugins/modules/open_iscsi.py create mode 100644 ansible_collections/community/general/plugins/modules/openbsd_pkg.py create mode 100644 ansible_collections/community/general/plugins/modules/opendj_backendprop.py create mode 100644 ansible_collections/community/general/plugins/modules/openwrt_init.py create mode 100644 ansible_collections/community/general/plugins/modules/opkg.py create mode 100644 ansible_collections/community/general/plugins/modules/osx_defaults.py create mode 100644 ansible_collections/community/general/plugins/modules/ovh_ip_failover.py create mode 100644 ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py create mode 100644 ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py create mode 100644 ansible_collections/community/general/plugins/modules/pacemaker_cluster.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_device.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_ip_subnet.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_project.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_sshkey.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_volume.py create mode 100644 ansible_collections/community/general/plugins/modules/packet_volume_attachment.py create mode 100644 ansible_collections/community/general/plugins/modules/pacman.py create mode 100644 ansible_collections/community/general/plugins/modules/pacman_key.py create mode 100644 ansible_collections/community/general/plugins/modules/pagerduty.py create mode 100644 ansible_collections/community/general/plugins/modules/pagerduty_alert.py create mode 100644 ansible_collections/community/general/plugins/modules/pagerduty_change.py create mode 100644 ansible_collections/community/general/plugins/modules/pagerduty_user.py create mode 100644 ansible_collections/community/general/plugins/modules/pam_limits.py create mode 100644 ansible_collections/community/general/plugins/modules/pamd.py create mode 100644 ansible_collections/community/general/plugins/modules/parted.py create mode 100644 ansible_collections/community/general/plugins/modules/pear.py create mode 100644 ansible_collections/community/general/plugins/modules/pids.py create mode 100644 ansible_collections/community/general/plugins/modules/pingdom.py create mode 100644 ansible_collections/community/general/plugins/modules/pip_package_info.py create mode 100644 ansible_collections/community/general/plugins/modules/pipx.py create mode 100644 ansible_collections/community/general/plugins/modules/pipx_info.py create mode 100644 ansible_collections/community/general/plugins/modules/pkg5.py create mode 100644 ansible_collections/community/general/plugins/modules/pkg5_publisher.py create mode 100644 ansible_collections/community/general/plugins/modules/pkgin.py create mode 100644 ansible_collections/community/general/plugins/modules/pkgng.py create mode 100644 ansible_collections/community/general/plugins/modules/pkgutil.py create mode 100644 ansible_collections/community/general/plugins/modules/pmem.py create mode 100644 ansible_collections/community/general/plugins/modules/portage.py create mode 100644 ansible_collections/community/general/plugins/modules/portinstall.py create mode 100644 ansible_collections/community/general/plugins/modules/pritunl_org.py create mode 100644 ansible_collections/community/general/plugins/modules/pritunl_org_info.py create mode 100644 ansible_collections/community/general/plugins/modules/pritunl_user.py create mode 100644 ansible_collections/community/general/plugins/modules/pritunl_user_info.py create mode 100644 ansible_collections/community/general/plugins/modules/profitbricks.py create mode 100644 ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py create mode 100644 ansible_collections/community/general/plugins/modules/profitbricks_nic.py create mode 100644 ansible_collections/community/general/plugins/modules/profitbricks_volume.py create mode 100644 ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_disk.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_domain_info.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_group_info.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_kvm.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_nic.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_snap.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_storage_info.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_template.py create mode 100644 ansible_collections/community/general/plugins/modules/proxmox_user_info.py create mode 100644 ansible_collections/community/general/plugins/modules/pubnub_blocks.py create mode 100644 ansible_collections/community/general/plugins/modules/pulp_repo.py create mode 100644 ansible_collections/community/general/plugins/modules/puppet.py create mode 100644 ansible_collections/community/general/plugins/modules/pushbullet.py create mode 100644 ansible_collections/community/general/plugins/modules/pushover.py create mode 100644 ansible_collections/community/general/plugins/modules/python_requirements_info.py create mode 100644 ansible_collections/community/general/plugins/modules/rax.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_cbs.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_cdb.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_cdb_database.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_cdb_user.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_clb.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_clb_nodes.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_clb_ssl.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_dns.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_dns_record.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_files.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_files_objects.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_identity.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_keypair.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_meta.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_mon_alarm.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_mon_check.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_mon_entity.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_mon_notification.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_network.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_queue.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_scaling_group.py create mode 100644 ansible_collections/community/general/plugins/modules/rax_scaling_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/read_csv.py create mode 100644 ansible_collections/community/general/plugins/modules/redfish_command.py create mode 100644 ansible_collections/community/general/plugins/modules/redfish_config.py create mode 100644 ansible_collections/community/general/plugins/modules/redfish_info.py create mode 100644 ansible_collections/community/general/plugins/modules/redhat_subscription.py create mode 100644 ansible_collections/community/general/plugins/modules/redis.py create mode 100644 ansible_collections/community/general/plugins/modules/redis_data.py create mode 100644 ansible_collections/community/general/plugins/modules/redis_data_incr.py create mode 100644 ansible_collections/community/general/plugins/modules/redis_data_info.py create mode 100644 ansible_collections/community/general/plugins/modules/redis_info.py create mode 100644 ansible_collections/community/general/plugins/modules/rhevm.py create mode 100644 ansible_collections/community/general/plugins/modules/rhn_channel.py create mode 100644 ansible_collections/community/general/plugins/modules/rhn_register.py create mode 100644 ansible_collections/community/general/plugins/modules/rhsm_release.py create mode 100644 ansible_collections/community/general/plugins/modules/rhsm_repository.py create mode 100644 ansible_collections/community/general/plugins/modules/riak.py create mode 100644 ansible_collections/community/general/plugins/modules/rocketchat.py create mode 100644 ansible_collections/community/general/plugins/modules/rollbar_deployment.py create mode 100644 ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py create mode 100644 ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py create mode 100644 ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py create mode 100644 ansible_collections/community/general/plugins/modules/rundeck_job_run.py create mode 100644 ansible_collections/community/general/plugins/modules/rundeck_project.py create mode 100644 ansible_collections/community/general/plugins/modules/runit.py create mode 100644 ansible_collections/community/general/plugins/modules/sap_task_list_execute.py create mode 100644 ansible_collections/community/general/plugins/modules/sapcar_extract.py create mode 100644 ansible_collections/community/general/plugins/modules/say.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_compute.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container_registry.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_database_backup.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_function.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_function_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_image_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_ip.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_ip_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_lb.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_organization_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_private_network.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_security_group.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_server_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_sshkey.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_user_data.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_volume.py create mode 100644 ansible_collections/community/general/plugins/modules/scaleway_volume_info.py create mode 100644 ansible_collections/community/general/plugins/modules/sefcontext.py create mode 100644 ansible_collections/community/general/plugins/modules/selinux_permissive.py create mode 100644 ansible_collections/community/general/plugins/modules/selogin.py create mode 100644 ansible_collections/community/general/plugins/modules/sendgrid.py create mode 100644 ansible_collections/community/general/plugins/modules/sensu_check.py create mode 100644 ansible_collections/community/general/plugins/modules/sensu_client.py create mode 100644 ansible_collections/community/general/plugins/modules/sensu_handler.py create mode 100644 ansible_collections/community/general/plugins/modules/sensu_silence.py create mode 100644 ansible_collections/community/general/plugins/modules/sensu_subscription.py create mode 100644 ansible_collections/community/general/plugins/modules/seport.py create mode 100644 ansible_collections/community/general/plugins/modules/serverless.py create mode 100644 ansible_collections/community/general/plugins/modules/shutdown.py create mode 100644 ansible_collections/community/general/plugins/modules/sl_vm.py create mode 100644 ansible_collections/community/general/plugins/modules/slack.py create mode 100644 ansible_collections/community/general/plugins/modules/slackpkg.py create mode 100644 ansible_collections/community/general/plugins/modules/smartos_image_info.py create mode 100644 ansible_collections/community/general/plugins/modules/snap.py create mode 100644 ansible_collections/community/general/plugins/modules/snap_alias.py create mode 100644 ansible_collections/community/general/plugins/modules/snmp_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/solaris_zone.py create mode 100644 ansible_collections/community/general/plugins/modules/sorcery.py create mode 100644 ansible_collections/community/general/plugins/modules/spectrum_device.py create mode 100644 ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py create mode 100644 ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py create mode 100644 ansible_collections/community/general/plugins/modules/ss_3par_cpg.py create mode 100644 ansible_collections/community/general/plugins/modules/ssh_config.py create mode 100644 ansible_collections/community/general/plugins/modules/stackdriver.py create mode 100644 ansible_collections/community/general/plugins/modules/stacki_host.py create mode 100644 ansible_collections/community/general/plugins/modules/statsd.py create mode 100644 ansible_collections/community/general/plugins/modules/statusio_maintenance.py create mode 100644 ansible_collections/community/general/plugins/modules/sudoers.py create mode 100644 ansible_collections/community/general/plugins/modules/supervisorctl.py create mode 100644 ansible_collections/community/general/plugins/modules/svc.py create mode 100644 ansible_collections/community/general/plugins/modules/svr4pkg.py create mode 100644 ansible_collections/community/general/plugins/modules/swdepot.py create mode 100644 ansible_collections/community/general/plugins/modules/swupd.py create mode 100644 ansible_collections/community/general/plugins/modules/syslogger.py create mode 100644 ansible_collections/community/general/plugins/modules/syspatch.py create mode 100644 ansible_collections/community/general/plugins/modules/sysrc.py create mode 100644 ansible_collections/community/general/plugins/modules/sysupgrade.py create mode 100644 ansible_collections/community/general/plugins/modules/taiga_issue.py create mode 100644 ansible_collections/community/general/plugins/modules/telegram.py create mode 100644 ansible_collections/community/general/plugins/modules/terraform.py create mode 100644 ansible_collections/community/general/plugins/modules/timezone.py create mode 100644 ansible_collections/community/general/plugins/modules/twilio.py create mode 100644 ansible_collections/community/general/plugins/modules/typetalk.py create mode 100644 ansible_collections/community/general/plugins/modules/udm_dns_record.py create mode 100644 ansible_collections/community/general/plugins/modules/udm_dns_zone.py create mode 100644 ansible_collections/community/general/plugins/modules/udm_group.py create mode 100644 ansible_collections/community/general/plugins/modules/udm_share.py create mode 100644 ansible_collections/community/general/plugins/modules/udm_user.py create mode 100644 ansible_collections/community/general/plugins/modules/ufw.py create mode 100644 ansible_collections/community/general/plugins/modules/uptimerobot.py create mode 100644 ansible_collections/community/general/plugins/modules/urpmi.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_aaa_group.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_dns_host.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_network_interface_address.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_exception.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_location.py create mode 100644 ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py create mode 100644 ansible_collections/community/general/plugins/modules/vdo.py create mode 100644 ansible_collections/community/general/plugins/modules/vertica_configuration.py create mode 100644 ansible_collections/community/general/plugins/modules/vertica_info.py create mode 100644 ansible_collections/community/general/plugins/modules/vertica_role.py create mode 100644 ansible_collections/community/general/plugins/modules/vertica_schema.py create mode 100644 ansible_collections/community/general/plugins/modules/vertica_user.py create mode 100644 ansible_collections/community/general/plugins/modules/vexata_eg.py create mode 100644 ansible_collections/community/general/plugins/modules/vexata_volume.py create mode 100644 ansible_collections/community/general/plugins/modules/vmadm.py create mode 100644 ansible_collections/community/general/plugins/modules/wakeonlan.py create mode 100644 ansible_collections/community/general/plugins/modules/wdc_redfish_command.py create mode 100644 ansible_collections/community/general/plugins/modules/wdc_redfish_info.py create mode 100644 ansible_collections/community/general/plugins/modules/webfaction_app.py create mode 100644 ansible_collections/community/general/plugins/modules/webfaction_db.py create mode 100644 ansible_collections/community/general/plugins/modules/webfaction_domain.py create mode 100644 ansible_collections/community/general/plugins/modules/webfaction_mailbox.py create mode 100644 ansible_collections/community/general/plugins/modules/webfaction_site.py create mode 100644 ansible_collections/community/general/plugins/modules/xattr.py create mode 100644 ansible_collections/community/general/plugins/modules/xbps.py create mode 100644 ansible_collections/community/general/plugins/modules/xcc_redfish_command.py create mode 100644 ansible_collections/community/general/plugins/modules/xenserver_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/xenserver_guest.py create mode 100644 ansible_collections/community/general/plugins/modules/xenserver_guest_info.py create mode 100644 ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py create mode 100644 ansible_collections/community/general/plugins/modules/xfconf.py create mode 100644 ansible_collections/community/general/plugins/modules/xfconf_info.py create mode 100644 ansible_collections/community/general/plugins/modules/xfs_quota.py create mode 100644 ansible_collections/community/general/plugins/modules/xml.py create mode 100644 ansible_collections/community/general/plugins/modules/yarn.py create mode 100644 ansible_collections/community/general/plugins/modules/yum_versionlock.py create mode 100644 ansible_collections/community/general/plugins/modules/zfs.py create mode 100644 ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py create mode 100644 ansible_collections/community/general/plugins/modules/zfs_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/znode.py create mode 100644 ansible_collections/community/general/plugins/modules/zpool_facts.py create mode 100644 ansible_collections/community/general/plugins/modules/zypper.py create mode 100644 ansible_collections/community/general/plugins/modules/zypper_repository.py (limited to 'ansible_collections/community/general/plugins/modules') diff --git a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py new file mode 100644 index 000000000..1eee5b1a2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +"""short_description: Check or wait for migrations between nodes""" + +# Copyright (c) 2018, Albert Autin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: aerospike_migrations +short_description: Check or wait for migrations between nodes +description: + - This can be used to check for migrations in a cluster. + This makes it easy to do a rolling upgrade/update on Aerospike nodes. + - If waiting for migrations is not desired, simply just poll until + port 3000 if available or asinfo -v status returns ok +author: "Albert Autin (@Alb0t)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: + - Which host do we use as seed for info connection + required: false + type: str + default: localhost + port: + description: + - Which port to connect to Aerospike on (service port) + required: false + type: int + default: 3000 + connect_timeout: + description: + - How long to try to connect before giving up (milliseconds) + required: false + type: int + default: 1000 + consecutive_good_checks: + description: + - How many times should the cluster report "no migrations" + consecutively before returning OK back to ansible? + required: false + type: int + default: 3 + sleep_between_checks: + description: + - How long to sleep between each check (seconds). + required: false + type: int + default: 60 + tries_limit: + description: + - How many times do we poll before giving up and failing? + default: 300 + required: false + type: int + local_only: + description: + - Do you wish to only check for migrations on the local node + before returning, or do you want all nodes in the cluster + to finish before returning? + required: true + type: bool + min_cluster_size: + description: + - Check will return bad until cluster size is met + or until tries is exhausted + required: false + type: int + default: 1 + fail_on_cluster_change: + description: + - Fail if the cluster key changes + if something else is changing the cluster, we may want to fail + required: false + type: bool + default: true + migrate_tx_key: + description: + - The metric key used to determine if we have tx migrations + remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_tx_partitions_remaining + migrate_rx_key: + description: + - The metric key used to determine if we have rx migrations + remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_rx_partitions_remaining + target_cluster_size: + description: + - When all aerospike builds in the cluster are greater than + version 4.3, then the C(cluster-stable) info command will be used. + Inside this command, you can optionally specify what the target + cluster size is - but it is not necessary. You can still rely on + min_cluster_size if you don't want to use this option. + - If this option is specified on a cluster that has at least 1 + host <4.3 then it will be ignored until the min version reaches + 4.3. + required: false + type: int +''' +EXAMPLES = ''' +# check for migrations on local node +- name: Wait for migrations on local node before proceeding + community.general.aerospike_migrations: + host: "localhost" + connect_timeout: 2000 + consecutive_good_checks: 5 + sleep_between_checks: 15 + tries_limit: 600 + local_only: false + +# example playbook: +- name: Upgrade aerospike + hosts: all + become: true + serial: 1 + tasks: + - name: Install dependencies + ansible.builtin.apt: + name: + - python + - python-pip + - python-setuptools + state: latest + - name: Setup aerospike + ansible.builtin.pip: + name: aerospike +# check for migrations every (sleep_between_checks) +# If at least (consecutive_good_checks) checks come back OK in a row, then return OK. +# Will exit if any exception, which can be caused by bad nodes, +# nodes not returning data, or other reasons. +# Maximum runtime before giving up in this case will be: +# Tries Limit * Sleep Between Checks * delay * retries + - name: Wait for aerospike migrations + community.general.aerospike_migrations: + local_only: true + sleep_between_checks: 1 + tries_limit: 5 + consecutive_good_checks: 3 + fail_on_cluster_change: true + min_cluster_size: 3 + target_cluster_size: 4 + register: migrations_check + until: migrations_check is succeeded + changed_when: false + delay: 60 + retries: 120 + - name: Another thing + ansible.builtin.shell: | + echo foo + - name: Reboot + ansible.builtin.reboot: +''' + +RETURN = ''' +# Returns only a success/failure result. Changed is always false. +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +LIB_FOUND_ERR = None +try: + import aerospike + from time import sleep + import re +except ImportError as ie: + LIB_FOUND = False + LIB_FOUND_ERR = traceback.format_exc() +else: + LIB_FOUND = True + + +def run_module(): + """run ansible module""" + module_args = dict( + host=dict(type='str', required=False, default='localhost'), + port=dict(type='int', required=False, default=3000), + connect_timeout=dict(type='int', required=False, default=1000), + consecutive_good_checks=dict(type='int', required=False, default=3), + sleep_between_checks=dict(type='int', required=False, default=60), + tries_limit=dict(type='int', required=False, default=300), + local_only=dict(type='bool', required=True), + min_cluster_size=dict(type='int', required=False, default=1), + target_cluster_size=dict(type='int', required=False, default=None), + fail_on_cluster_change=dict(type='bool', required=False, default=True), + migrate_tx_key=dict(type='str', required=False, no_log=False, + default="migrate_tx_partitions_remaining"), + migrate_rx_key=dict(type='str', required=False, no_log=False, + default="migrate_rx_partitions_remaining") + ) + + result = dict( + changed=False, + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + if not LIB_FOUND: + module.fail_json(msg=missing_required_lib('aerospike'), + exception=LIB_FOUND_ERR) + + try: + if module.check_mode: + has_migrations, skip_reason = False, None + else: + migrations = Migrations(module) + has_migrations, skip_reason = migrations.has_migs( + module.params['local_only'] + ) + + if has_migrations: + module.fail_json(msg="Failed.", skip_reason=skip_reason) + except Exception as e: + module.fail_json(msg="Error: {0}".format(e)) + + module.exit_json(**result) + + +class Migrations: + """ Check or wait for migrations between nodes """ + + def __init__(self, module): + self.module = module + self._client = self._create_client().connect() + self._nodes = {} + self._update_nodes_list() + self._cluster_statistics = {} + self._update_cluster_statistics() + self._namespaces = set() + self._update_cluster_namespace_list() + self._build_list = set() + self._update_build_list() + self._start_cluster_key = \ + self._cluster_statistics[self._nodes[0]]['cluster_key'] + + def _create_client(self): + """ TODO: add support for auth, tls, and other special features + I won't use those features, so I'll wait until somebody complains + or does it for me (Cross fingers) + create the client object""" + config = { + 'hosts': [ + (self.module.params['host'], self.module.params['port']) + ], + 'policies': { + 'timeout': self.module.params['connect_timeout'] + } + } + return aerospike.client(config) + + def _info_cmd_helper(self, cmd, node=None, delimiter=';'): + """delimiter is for separate stats that come back, NOT for kv + separation which is =""" + if node is None: # If no node passed, use the first one (local) + node = self._nodes[0] + data = self._client.info_node(cmd, node) + data = data.split("\t") + if len(data) != 1 and len(data) != 2: + self.module.fail_json( + msg="Unexpected number of values returned in info command: " + + str(len(data)) + ) + # data will be in format 'command\touput' + data = data[-1] + data = data.rstrip("\n\r") + data_arr = data.split(delimiter) + + # some commands don't return in kv format + # so we dont want a dict from those. + if '=' in data: + retval = dict( + metric.split("=", 1) for metric in data_arr + ) + else: + # if only 1 element found, and not kv, return just the value. + if len(data_arr) == 1: + retval = data_arr[0] + else: + retval = data_arr + return retval + + def _update_build_list(self): + """creates self._build_list which is a unique list + of build versions.""" + self._build_list = set() + for node in self._nodes: + build = self._info_cmd_helper('build', node) + self._build_list.add(build) + + # just checks to see if the version is 4.3 or greater + def _can_use_cluster_stable(self): + # if version <4.3 we can't use cluster-stable info cmd + # regex hack to check for versions beginning with 0-3 or + # beginning with 4.0,4.1,4.2 + if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)): + return False + return True + + def _update_cluster_namespace_list(self): + """ make a unique list of namespaces + TODO: does this work on a rolling namespace add/deletion? + thankfully if it doesn't, we dont need this on builds >=4.3""" + self._namespaces = set() + for node in self._nodes: + namespaces = self._info_cmd_helper('namespaces', node) + for namespace in namespaces: + self._namespaces.add(namespace) + + def _update_cluster_statistics(self): + """create a dict of nodes with their related stats """ + self._cluster_statistics = {} + for node in self._nodes: + self._cluster_statistics[node] = \ + self._info_cmd_helper('statistics', node) + + def _update_nodes_list(self): + """get a fresh list of all the nodes""" + self._nodes = self._client.get_nodes() + if not self._nodes: + self.module.fail_json("Failed to retrieve at least 1 node.") + + def _namespace_has_migs(self, namespace, node=None): + """returns a True or False. + Does the namespace have migrations for the node passed? + If no node passed, uses the local node or the first one in the list""" + namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) + try: + namespace_tx = \ + int(namespace_stats[self.module.params['migrate_tx_key']]) + namespace_rx = \ + int(namespace_stats[self.module.params['migrate_rx_key']]) + except KeyError: + self.module.fail_json( + msg="Did not find partition remaining key:" + + self.module.params['migrate_tx_key'] + + " or key:" + + self.module.params['migrate_rx_key'] + + " in 'namespace/" + + namespace + + "' output." + ) + except TypeError: + self.module.fail_json( + msg="namespace stat returned was not numerical" + ) + return namespace_tx != 0 or namespace_rx != 0 + + def _node_has_migs(self, node=None): + """just calls namespace_has_migs and + if any namespace has migs returns true""" + migs = 0 + self._update_cluster_namespace_list() + for namespace in self._namespaces: + if self._namespace_has_migs(namespace, node): + migs += 1 + return migs != 0 + + def _cluster_key_consistent(self): + """create a dictionary to store what each node + returns the cluster key as. we should end up with only 1 dict key, + with the key being the cluster key.""" + cluster_keys = {} + for node in self._nodes: + cluster_key = self._cluster_statistics[node][ + 'cluster_key'] + if cluster_key not in cluster_keys: + cluster_keys[cluster_key] = 1 + else: + cluster_keys[cluster_key] += 1 + if len(cluster_keys.keys()) == 1 and \ + self._start_cluster_key in cluster_keys: + return True + return False + + def _cluster_migrates_allowed(self): + """ensure all nodes have 'migrate_allowed' in their stats output""" + for node in self._nodes: + node_stats = self._info_cmd_helper('statistics', node) + allowed = node_stats['migrate_allowed'] + if allowed == "false": + return False + return True + + def _cluster_has_migs(self): + """calls node_has_migs for each node""" + migs = 0 + for node in self._nodes: + if self._node_has_migs(node): + migs += 1 + if migs == 0: + return False + return True + + def _has_migs(self, local): + if local: + return self._local_node_has_migs() + return self._cluster_has_migs() + + def _local_node_has_migs(self): + return self._node_has_migs(None) + + def _is_min_cluster_size(self): + """checks that all nodes in the cluster are returning the + minimum cluster size specified in their statistics output""" + sizes = set() + for node in self._cluster_statistics: + sizes.add(int(self._cluster_statistics[node]['cluster_size'])) + + if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no + return False + if (min(sizes)) >= self.module.params['min_cluster_size']: + return True + return False + + def _cluster_stable(self): + """Added 4.3: + cluster-stable:size=;ignore-migrations=;namespace= + Returns the current 'cluster_key' when the following are satisfied: + + If 'size' is specified then the target node's 'cluster-size' + must match size. + If 'ignore-migrations' is either unspecified or 'false' then + the target node's migrations counts must be zero for the provided + 'namespace' or all namespaces if 'namespace' is not provided.""" + cluster_key = set() + cluster_key.add(self._info_cmd_helper('statistics')['cluster_key']) + cmd = "cluster-stable:" + target_cluster_size = self.module.params['target_cluster_size'] + if target_cluster_size is not None: + cmd = cmd + "size=" + str(target_cluster_size) + ";" + for node in self._nodes: + try: + cluster_key.add(self._info_cmd_helper(cmd, node)) + except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception + if 'unstable-cluster' in e.msg: + return False + raise e + if len(cluster_key) == 1: + return True + return False + + def _cluster_good_state(self): + """checks a few things to make sure we're OK to say the cluster + has no migs. It could be in a unhealthy condition that does not allow + migs, or a split brain""" + if self._cluster_key_consistent() is not True: + return False, "Cluster key inconsistent." + if self._is_min_cluster_size() is not True: + return False, "Cluster min size not reached." + if self._cluster_migrates_allowed() is not True: + return False, "migrate_allowed is false somewhere." + return True, "OK." + + def has_migs(self, local=True): + """returns a boolean, False if no migrations otherwise True""" + consecutive_good = 0 + try_num = 0 + skip_reason = list() + while \ + try_num < int(self.module.params['tries_limit']) and \ + consecutive_good < \ + int(self.module.params['consecutive_good_checks']): + + self._update_nodes_list() + self._update_cluster_statistics() + + # These checks are outside of the while loop because + # we probably want to skip & sleep instead of failing entirely + stable, reason = self._cluster_good_state() + if stable is not True: + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + reason + ) + else: + if self._can_use_cluster_stable(): + if self._cluster_stable(): + consecutive_good += 1 + else: + consecutive_good = 0 + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " cluster_stable" + ) + elif self._has_migs(local): + # print("_has_migs") + skip_reason.append( + "Skipping on try#" + str(try_num) + + " for reason:" + " migrations" + ) + consecutive_good = 0 + else: + consecutive_good += 1 + if consecutive_good == self.module.params[ + 'consecutive_good_checks']: + break + try_num += 1 + sleep(self.module.params['sleep_between_checks']) + # print(skip_reason) + if consecutive_good == self.module.params['consecutive_good_checks']: + return False, None + return True, skip_reason + + +def main(): + """main method for ansible module""" + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py new file mode 100644 index 000000000..42ac037e1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Bruce Pennypacker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: airbrake_deployment +author: +- "Bruce Pennypacker (@bpennypacker)" +- "Patrick Humpal (@phumpal)" +short_description: Notify airbrake about app deployments +description: + - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + project_id: + description: + - Airbrake PROJECT_ID + required: true + type: str + version_added: '0.2.0' + project_key: + description: + - Airbrake PROJECT_KEY. + required: true + type: str + version_added: '0.2.0' + environment: + description: + - The airbrake environment name, typically 'production', 'staging', etc. + required: true + type: str + user: + description: + - The username of the person doing the deployment + required: false + type: str + repo: + description: + - URL of the project repository + required: false + type: str + revision: + description: + - A hash, number, tag, or other identifier showing what revision from version control was deployed + required: false + type: str + version: + description: + - A string identifying what version was deployed + required: false + type: str + version_added: '1.0.0' + url: + description: + - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. + required: false + default: "https://api.airbrake.io/api/v4/projects/" + type: str + validate_certs: + description: + - If C(false), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: true + type: bool + +requirements: [] +''' + +EXAMPLES = ''' +- name: Notify airbrake about an app deployment + community.general.airbrake_deployment: + project_id: '12345' + project_key: 'AAAAAA' + environment: staging + user: ansible + revision: '4.2' + +- name: Notify airbrake about an app deployment, using git hash as revision + community.general.airbrake_deployment: + project_id: '12345' + project_key: 'AAAAAA' + environment: staging + user: ansible + revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' + version: '0.2.0' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + project_id=dict(required=True, no_log=True, type='str'), + project_key=dict(required=True, no_log=True, type='str'), + environment=dict(required=True, type='str'), + user=dict(required=False, type='str'), + repo=dict(required=False, type='str'), + revision=dict(required=False, type='str'), + version=dict(required=False, type='str'), + url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True, + ) + + # Build list of params + params = {} + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 + if module.params["environment"]: + params["environment"] = module.params["environment"] + + if module.params["user"]: + params["username"] = module.params["user"] + + if module.params["repo"]: + params["repository"] = module.params["repo"] + + if module.params["revision"]: + params["revision"] = module.params["revision"] + + if module.params["version"]: + params["version"] = module.params["version"] + + # Build deploy url + url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] + json_body = module.jsonify(params) + + # Build header + headers = {'Content-Type': 'application/json'} + + # Notify Airbrake of deploy + response, info = fetch_url(module, url, data=json_body, + headers=headers, method='POST') + + if info['status'] == 200 or info['status'] == 201: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/aix_devices.py b/ansible_collections/community/general/plugins/modules/aix_devices.py new file mode 100644 index 000000000..ef4ed4961 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aix_devices.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, 2018 Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: +- Kairo Araujo (@kairoaraujo) +module: aix_devices +short_description: Manages AIX devices +description: +- This module discovers, defines, removes and modifies attributes of AIX devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + attributes: + description: + - A list of device attributes. + type: dict + device: + description: + - The name of the device. + - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). + type: str + force: + description: + - Forces action. + type: bool + default: false + recursive: + description: + - Removes or defines a device and children devices. + type: bool + default: false + state: + description: + - Controls the device state. + - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). + - C(removed) (alias C(absent) removes a device. + - C(defined) changes device to Defined state. + type: str + choices: [ available, defined, removed ] + default: available +''' + +EXAMPLES = r''' +- name: Scan new devices + community.general.aix_devices: + device: all + state: available + +- name: Scan new virtual devices (vio0) + community.general.aix_devices: + device: vio0 + state: available + +- name: Removing IP alias to en0 + community.general.aix_devices: + device: en0 + attributes: + delalias4: 10.0.0.100,255.255.255.0 + +- name: Removes ent2 + community.general.aix_devices: + device: ent2 + state: removed + +- name: Put device en2 in Defined + community.general.aix_devices: + device: en2 + state: defined + +- name: Removes ent4 (inexistent). + community.general.aix_devices: + device: ent4 + state: removed + +- name: Put device en4 in Defined (inexistent) + community.general.aix_devices: + device: en4 + state: defined + +- name: Put vscsi1 and children devices in Defined state. + community.general.aix_devices: + device: vscsi1 + recursive: true + state: defined + +- name: Removes vscsi1 and children devices. + community.general.aix_devices: + device: vscsi1 + recursive: true + state: removed + +- name: Changes en1 mtu to 9000 and disables arp. + community.general.aix_devices: + device: en1 + attributes: + mtu: 900 + arp: 'off' + state: available + +- name: Configure IP, netmask and set en1 up. + community.general.aix_devices: + device: en1 + attributes: + netaddr: 192.168.0.100 + netmask: 255.255.255.0 + state: up + state: available + +- name: Adding IP alias to en0 + community.general.aix_devices: + device: en0 + attributes: + alias4: 10.0.0.100,255.255.255.0 + state: available +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule + + +def _check_device(module, device): + """ + Check if device already exists and the state. + Args: + module: Ansible module. + device: device to be checked. + + Returns: bool, device state + + """ + lsdev_cmd = module.get_bin_path('lsdev', True) + rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) + + if rc != 0: + module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) + + if lsdev_out: + device_state = lsdev_out.split()[1] + return True, device_state + + device_state = None + return False, device_state + + +def _check_device_attr(module, device, attr): + """ + + Args: + module: Ansible module. + device: device to check attributes. + attr: attribute to be checked. + + Returns: + + """ + lsattr_cmd = module.get_bin_path('lsattr', True) + rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) + + hidden_attrs = ['delalias4', 'delalias6'] + + if rc == 255: + + if attr in hidden_attrs: + current_param = '' + else: + current_param = None + + return current_param + + elif rc != 0: + module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) + + current_param = lsattr_out.split()[1] + return current_param + + +def discover_device(module, device): + """ Discover AIX devices.""" + cfgmgr_cmd = module.get_bin_path('cfgmgr', True) + + if device is not None: + device = "-l %s" % device + + else: + device = '' + + changed = True + msg = '' + if not module.check_mode: + rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) + changed = True + msg = cfgmgr_out + + return changed, msg + + +def change_device_attr(module, attributes, device, force): + """ Change AIX device attribute. """ + + attr_changed = [] + attr_not_changed = [] + attr_invalid = [] + chdev_cmd = module.get_bin_path('chdev', True) + + for attr in list(attributes.keys()): + new_param = attributes[attr] + current_param = _check_device_attr(module, device, attr) + + if current_param is None: + attr_invalid.append(attr) + + elif current_param != new_param: + if force: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] + else: + cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] + + if not module.check_mode: + rc, chdev_out, err = module.run_command(cmd) + if rc != 0: + module.exit_json(msg="Failed to run chdev.", rc=rc, err=err) + + attr_changed.append(attributes[attr]) + else: + attr_not_changed.append(attributes[attr]) + + if len(attr_changed) > 0: + changed = True + attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) + else: + changed = False + attr_changed_msg = '' + + if len(attr_not_changed) > 0: + attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) + else: + attr_not_changed_msg = '' + + if len(attr_invalid) > 0: + attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) + else: + attr_invalid_msg = '' + + msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) + + return changed, msg + + +def remove_device(module, device, force, recursive, state): + """ Puts device in defined state or removes device. """ + + state_opt = { + 'removed': '-d', + 'absent': '-d', + 'defined': '' + } + + recursive_opt = { + True: '-R', + False: '' + } + + recursive = recursive_opt[recursive] + state = state_opt[state] + + changed = True + msg = '' + rmdev_cmd = module.get_bin_path('rmdev', True) + + if not module.check_mode: + if state: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) + else: + rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) + + if rc != 0: + module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) + + msg = rmdev_out + + return changed, msg + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + attributes=dict(type='dict'), + device=dict(type='str'), + force=dict(type='bool', default=False), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='available', choices=['available', 'defined', 'removed']), + ), + supports_check_mode=True, + ) + + force_opt = { + True: '-f', + False: '', + } + + attributes = module.params['attributes'] + device = module.params['device'] + force = force_opt[module.params['force']] + recursive = module.params['recursive'] + state = module.params['state'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'available' or state == 'present': + if attributes: + # change attributes on device + device_status, device_state = _check_device(module, device) + if device_status: + result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) + else: + result['msg'] = "Device %s does not exist." % device + + else: + # discovery devices (cfgmgr) + if device and device != 'all': + device_status, device_state = _check_device(module, device) + if device_status: + # run cfgmgr on specific device + result['changed'], result['msg'] = discover_device(module, device) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['changed'], result['msg'] = discover_device(module, device) + + elif state == 'removed' or state == 'absent' or state == 'defined': + if not device: + result['msg'] = "device is required to removed or defined state." + + else: + # Remove device + check_device, device_state = _check_device(module, device) + if check_device: + if state == 'defined' and device_state == 'Defined': + result['changed'] = False + result['msg'] = 'Device %s already in Defined' % device + + else: + result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) + + else: + result['msg'] = "Device %s does not exist." % device + + else: + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/aix_filesystem.py new file mode 100644 index 000000000..b1f363a93 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aix_filesystem.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Kairo Araujo (@kairoaraujo) +module: aix_filesystem +short_description: Configure LVM and NFS file systems for AIX +description: + - This module creates, removes, mount and unmount LVM and NFS file system for + AIX using C(/etc/filesystems). + - For LVM file systems is possible to resize a file system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + account_subsystem: + description: + - Specifies whether the file system is to be processed by the accounting subsystem. + type: bool + default: false + attributes: + description: + - Specifies attributes for files system separated by comma. + type: list + elements: str + default: + - agblksize='4096' + - isnapshot='no' + auto_mount: + description: + - File system is automatically mounted at system restart. + type: bool + default: true + device: + description: + - Logical volume (LV) device name or remote export device to create a NFS file system. + - It is used to create a file system on an already existing logical volume or the exported NFS file system. + - If not mentioned a new logical volume name will be created following AIX standards (LVM). + type: str + fs_type: + description: + - Specifies the virtual file system type. + type: str + default: jfs2 + permissions: + description: + - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). + type: str + choices: [ ro, rw ] + default: rw + mount_group: + description: + - Specifies the mount group. + type: str + filesystem: + description: + - Specifies the mount point, which is the directory where the file system will be mounted. + type: str + required: true + nfs_server: + description: + - Specifies a Network File System (NFS) server. + type: str + rm_mount_point: + description: + - Removes the mount point directory when used with state C(absent). + type: bool + default: false + size: + description: + - Specifies the file system size. + - For already C(present) it will be resized. + - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified + it will be in Megabytes. If the value has G specified it will be in + Gigabytes. + - If no M or G the value will be 512-byte blocks. + - If "+" is specified in begin of value, the value will be added. + - If "-" is specified in begin of value, the value will be removed. + - If "+" or "-" is not specified, the total value will be the specified. + - Size will respects the LVM AIX standards. + type: str + state: + description: + - Controls the file system state. + - C(present) check if file system exists, creates or resize. + - C(absent) removes existing file system if already C(unmounted). + - C(mounted) checks if the file system is mounted or mount the file system. + - C(unmounted) check if the file system is unmounted or unmount the file system. + type: str + choices: [ absent, mounted, present, unmounted ] + default: present + vg: + description: + - Specifies an existing volume group (VG). + type: str +notes: + - For more C(attributes), please check "crfs" AIX manual. +''' + +EXAMPLES = r''' +- name: Create filesystem in a previously defined logical volume. + community.general.aix_filesystem: + device: testlv + filesystem: /testfs + state: present + +- name: Creating NFS filesystem from nfshost. + community.general.aix_filesystem: + device: /home/ftp + nfs_server: nfshost + filesystem: /home/ftp + state: present + +- name: Creating a new file system without a previously logical volume. + community.general.aix_filesystem: + filesystem: /newfs + size: 1G + state: present + vg: datavg + +- name: Unmounting /testfs. + community.general.aix_filesystem: + filesystem: /testfs + state: unmounted + +- name: Resizing /mksysb to +512M. + community.general.aix_filesystem: + filesystem: /mksysb + size: +512M + state: present + +- name: Resizing /mksysb to 11G. + community.general.aix_filesystem: + filesystem: /mksysb + size: 11G + state: present + +- name: Resizing /mksysb to -2G. + community.general.aix_filesystem: + filesystem: /mksysb + size: -2G + state: present + +- name: Remove NFS filesystem /home/ftp. + community.general.aix_filesystem: + filesystem: /home/ftp + rm_mount_point: true + state: absent + +- name: Remove /newfs. + community.general.aix_filesystem: + filesystem: /newfs + rm_mount_point: true + state: absent +''' + +RETURN = r''' +changed: + description: Return changed for aix_filesystems actions as true or false. + returned: always + type: bool +msg: + description: Return message regarding the action. + returned: always + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils._mount import ismount +import re + + +def _fs_exists(module, filesystem): + """ + Check if file system already exists on /etc/filesystems. + + :param module: Ansible module. + :param community.general.filesystem: filesystem name. + :return: True or False. + """ + lsfs_cmd = module.get_bin_path('lsfs', True) + rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem]) + if rc == 1: + if re.findall("No record matching", err): + return False + + else: + module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) + + else: + + return True + + +def _check_nfs_device(module, nfs_host, device): + """ + Validate if NFS server is exporting the device (remote export). + + :param module: Ansible module. + :param nfs_host: nfs_host parameter, NFS server. + :param device: device parameter, remote export. + :return: True or False. + """ + showmount_cmd = module.get_bin_path('showmount', True) + rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host]) + if rc != 0: + module.fail_json(msg="Failed to run showmount. Error message: %s" % err) + else: + showmount_data = showmount_out.splitlines() + for line in showmount_data: + if line.split(':')[1] == device: + return True + + return False + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"]) + if rc != 0: + module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group %s is in varyoff state." % vg + return False, msg + elif vg in current_active_vgs: + msg = "Volume group %s is in varyon state." % vg + return True, msg + else: + msg = "Volume group %s does not exist." % vg + return None, msg + + +def resize_fs(module, filesystem, size): + """ Resize LVM file system. """ + + chfs_cmd = module.get_bin_path('chfs', True) + if not module.check_mode: + rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem]) + + if rc == 28: + changed = False + return changed, chfs_out + elif rc != 0: + if re.findall('Maximum allocation for logical', err): + changed = False + return changed, err + else: + module.fail_json(msg="Failed to run chfs. Error message: %s" % err) + + else: + if re.findall('The filesystem size is already', chfs_out): + changed = False + else: + changed = True + + return changed, chfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, + account_subsystem, permissions, nfs_server, attributes): + """ Create LVM file system or NFS remote mount point. """ + + attributes = ' -a '.join(attributes) + + # Parameters definition. + account_subsys_opt = { + True: '-t yes', + False: '-t no' + } + + if nfs_server is not None: + auto_mount_opt = { + True: '-A', + False: '-a' + } + + else: + auto_mount_opt = { + True: '-A yes', + False: '-A no' + } + + if size is None: + size = '' + else: + size = "-a size=%s" % size + + if device is None: + device = '' + else: + device = "-d %s" % device + + if vg is None: + vg = '' + else: + vg_state, msg = _validate_vg(module, vg) + if vg_state: + vg = "-g %s" % vg + else: + changed = False + + return changed, msg + + if mount_group is None: + mount_group = '' + + else: + mount_group = "-u %s" % mount_group + + auto_mount = auto_mount_opt[auto_mount] + account_subsystem = account_subsys_opt[account_subsystem] + + if nfs_server is not None: + # Creates a NFS file system. + mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True) + if not module.check_mode: + rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"]) + if rc != 0: + module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) + else: + changed = True + msg = "NFS file system %s created." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + else: + # Creates a LVM file system. + crfs_cmd = module.get_bin_path('crfs', True) + if not module.check_mode: + cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes] + rc, crfs_out, err = module.run_command(cmd) + + if rc == 10: + module.exit_json( + msg="Using a existent previously defined logical volume, " + "volume group needs to be empty. %s" % err) + + elif rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + + else: + changed = True + return changed, crfs_out + else: + changed = True + msg = '' + + return changed, msg + + +def remove_fs(module, filesystem, rm_mount_point): + """ Remove an LVM file system or NFS entry. """ + + # Command parameters. + rm_mount_point_opt = { + True: '-r', + False: '' + } + + rm_mount_point = rm_mount_point_opt[rm_mount_point] + + rmfs_cmd = module.get_bin_path('rmfs', True) + if not module.check_mode: + cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem] + rc, rmfs_out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + else: + changed = True + msg = rmfs_out + if not rmfs_out: + msg = "File system %s removed." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def mount_fs(module, filesystem): + """ Mount a file system. """ + mount_cmd = module.get_bin_path('mount', True) + + if not module.check_mode: + rc, mount_out, err = module.run_command([mount_cmd, filesystem]) + if rc != 0: + module.fail_json(msg="Failed to run mount. Error message: %s" % err) + else: + changed = True + msg = "File system %s mounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def unmount_fs(module, filesystem): + """ Unmount a file system.""" + unmount_cmd = module.get_bin_path('unmount', True) + + if not module.check_mode: + rc, unmount_out, err = module.run_command([unmount_cmd, filesystem]) + if rc != 0: + module.fail_json(msg="Failed to run unmount. Error message: %s" % err) + else: + changed = True + msg = "File system %s unmounted." % filesystem + + return changed, msg + else: + changed = True + msg = '' + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_subsystem=dict(type='bool', default=False), + attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]), + auto_mount=dict(type='bool', default=True), + device=dict(type='str'), + filesystem=dict(type='str', required=True), + fs_type=dict(type='str', default='jfs2'), + permissions=dict(type='str', default='rw', choices=['rw', 'ro']), + mount_group=dict(type='str'), + nfs_server=dict(type='str'), + rm_mount_point=dict(type='bool', default=False), + size=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']), + vg=dict(type='str'), + ), + supports_check_mode=True, + ) + + account_subsystem = module.params['account_subsystem'] + attributes = module.params['attributes'] + auto_mount = module.params['auto_mount'] + device = module.params['device'] + fs_type = module.params['fs_type'] + permissions = module.params['permissions'] + mount_group = module.params['mount_group'] + filesystem = module.params['filesystem'] + nfs_server = module.params['nfs_server'] + rm_mount_point = module.params['rm_mount_point'] + size = module.params['size'] + state = module.params['state'] + vg = module.params['vg'] + + result = dict( + changed=False, + msg='', + ) + + if state == 'present': + fs_mounted = ismount(filesystem) + fs_exists = _fs_exists(module, filesystem) + + # Check if fs is mounted or exists. + if fs_mounted or fs_exists: + result['msg'] = "File system %s already exists." % filesystem + result['changed'] = False + + # If parameter size was passed, resize fs. + if size is not None: + result['changed'], result['msg'] = resize_fs(module, filesystem, size) + + # If fs doesn't exist, create it. + else: + # Check if fs will be a NFS device. + if nfs_server is not None: + if device is None: + result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.' + module.fail_json(**result) + else: + # Create a fs from NFS export. + if _check_nfs_device(module, nfs_server, device): + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is None: + if vg is None: + result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.' + module.fail_json(**result) + else: + # Create a fs from + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + if device is not None and nfs_server is None: + # Create a fs from a previously lv device. + result['changed'], result['msg'] = create_fs( + module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes) + + elif state == 'absent': + if ismount(filesystem): + result['msg'] = "File system %s mounted." % filesystem + + else: + fs_status = _fs_exists(module, filesystem) + if not fs_status: + result['msg'] = "File system %s does not exist." % filesystem + else: + result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) + + elif state == 'mounted': + if ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already mounted." % filesystem + else: + result['changed'], result['msg'] = mount_fs(module, filesystem) + + elif state == 'unmounted': + if not ismount(filesystem): + result['changed'] = False + result['msg'] = "File system %s already unmounted." % filesystem + else: + result['changed'], result['msg'] = unmount_fs(module, filesystem) + + else: + # Unreachable codeblock + result['msg'] = "Unexpected state %s." % state + module.fail_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/aix_inittab.py b/ansible_collections/community/general/plugins/modules/aix_inittab.py new file mode 100644 index 000000000..c2c968189 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aix_inittab.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Joris Weijters +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Joris Weijters (@molekuul) +module: aix_inittab +short_description: Manages the inittab on AIX +description: + - Manages the inittab on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the inittab entry. + type: str + required: true + aliases: [ service ] + runlevel: + description: + - Runlevel of the entry. + type: str + required: true + action: + description: + - Action what the init has to do with this entry. + type: str + choices: + - boot + - bootwait + - hold + - initdefault + - 'off' + - once + - ondemand + - powerfail + - powerwait + - respawn + - sysinit + - wait + command: + description: + - What command has to run. + type: str + required: true + insertafter: + description: + - After which inittabline should the new entry inserted. + type: str + state: + description: + - Whether the entry should be present or absent in the inittab file. + type: str + choices: [ absent, present ] + default: present +notes: + - The changes are persistent across reboots. + - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. + - Tested on AIX 7.1. +requirements: +- itertools +''' + +EXAMPLES = ''' +# Add service startmyservice to the inittab, directly after service existingservice. +- name: Add startmyservice to inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 4 + action: once + command: echo hello + insertafter: existingservice + state: present + become: true + +# Change inittab entry startmyservice to runlevel "2" and processaction "wait". +- name: Change startmyservice to inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: present + become: true + +- name: Remove startmyservice from inittab + community.general.aix_inittab: + name: startmyservice + runlevel: 2 + action: wait + command: echo hello + state: absent + become: true +''' + +RETURN = ''' +name: + description: Name of the adjusted inittab entry + returned: always + type: str + sample: startmyservice +msg: + description: Action done with the inittab entry + returned: changed + type: str + sample: changed inittab entry startmyservice +changed: + description: Whether the inittab changed or not + returned: always + type: bool + sample: true +''' + +# Import necessary libraries +try: + # python 2 + from itertools import izip +except ImportError: + izip = zip + +from ansible.module_utils.basic import AnsibleModule + +# end import modules +# start defining the functions + + +def check_current_entry(module): + # Check if entry exists, if not return False in exists in return dict, + # if true return True and the entry in return dict + existsdict = {'exist': False} + lsitab = module.get_bin_path('lsitab') + (rc, out, err) = module.run_command([lsitab, module.params['name']]) + if rc == 0: + keys = ('name', 'runlevel', 'action', 'command') + values = out.split(":") + # strip non readable characters as \n + values = map(lambda s: s.strip(), values) + existsdict = dict(izip(keys, values)) + existsdict.update({'exist': True}) + return existsdict + + +def main(): + # initialize + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['service']), + runlevel=dict(type='str', required=True), + action=dict(type='str', choices=[ + 'boot', + 'bootwait', + 'hold', + 'initdefault', + 'off', + 'once', + 'ondemand', + 'powerfail', + 'powerwait', + 'respawn', + 'sysinit', + 'wait', + ]), + command=dict(type='str', required=True), + insertafter=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + result = { + 'name': module.params['name'], + 'changed': False, + 'msg': "" + } + + # Find commandline strings + mkitab = module.get_bin_path('mkitab') + rmitab = module.get_bin_path('rmitab') + chitab = module.get_bin_path('chitab') + rc = 0 + + # check if the new entry exists + current_entry = check_current_entry(module) + + # if action is install or change, + if module.params['state'] == 'present': + + # create new entry string + new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ + ":" + module.params['action'] + ":" + module.params['command'] + + # If current entry exists or fields are different(if the entry does not + # exists, then the entry wil be created + if (not current_entry['exist']) or ( + module.params['runlevel'] != current_entry['runlevel'] or + module.params['action'] != current_entry['action'] or + module.params['command'] != current_entry['command']): + + # If the entry does exist then change the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command([chitab, new_entry]) + if rc != 0: + module.fail_json( + msg="could not change inittab", rc=rc, err=err) + result['msg'] = "changed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + # If the entry does not exist create the entry + elif not current_entry['exist']: + if module.params['insertafter']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, '-i', module.params['insertafter'], new_entry]) + else: + if not module.check_mode: + (rc, out, err) = module.run_command( + [mkitab, new_entry]) + + if rc != 0: + module.fail_json(msg="could not adjust inittab", rc=rc, err=err) + result['msg'] = "add inittab entry" + " " + module.params['name'] + result['changed'] = True + + elif module.params['state'] == 'absent': + # If the action is remove and the entry exists then remove the entry + if current_entry['exist']: + if not module.check_mode: + (rc, out, err) = module.run_command( + [rmitab, module.params['name']]) + if rc != 0: + module.fail_json( + msg="could not remove entry from inittab)", rc=rc, err=err) + result['msg'] = "removed inittab entry" + " " + current_entry['name'] + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/aix_lvg.py b/ansible_collections/community/general/plugins/modules/aix_lvg.py new file mode 100644 index 000000000..d89c43de4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aix_lvg.py @@ -0,0 +1,371 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Kairo Araujo (@kairoaraujo) +module: aix_lvg +short_description: Manage LVM volume groups on AIX +description: + - This module creates, removes or resize volume groups on AIX LVM. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + force: + description: + - Force volume group creation. + type: bool + default: false + pp_size: + description: + - The size of the physical partition in megabytes. + type: int + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or extending (C(present) state) the volume group. + - If not informed reducing (C(absent) state) the volume group will be removed. + type: list + elements: str + state: + description: + - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). + type: str + choices: [ absent, present, varyoff, varyon ] + default: present + vg: + description: + - The name of the volume group. + type: str + required: true + vg_type: + description: + - The type of the volume group. + type: str + choices: [ big, normal, scalable ] + default: normal +notes: +- AIX will permit remove VG only if all LV/Filesystems are not busy. +- Module does not modify PP size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group datavg + community.general.aix_lvg: + vg: datavg + pp_size: 128 + vg_type: scalable + state: present + +- name: Removing a volume group datavg + community.general.aix_lvg: + vg: datavg + state: absent + +- name: Extending rootvg + community.general.aix_lvg: + vg: rootvg + pvs: hdisk1 + state: present + +- name: Reducing rootvg + community.general.aix_lvg: + vg: rootvg + pvs: hdisk1 + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule + + +def _validate_pv(module, vg, pvs): + """ + Function to validate if the physical volume (PV) is not already in use by + another volume group or Oracle ASM. + + :param module: Ansible module argument spec. + :param vg: Volume group name. + :param pvs: Physical volume list. + :return: [bool, message] or module.fail_json for errors. + """ + + lspv_cmd = module.get_bin_path('lspv', True) + rc, current_lspv, stderr = module.run_command([lspv_cmd]) + if rc != 0: + module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr) + + for pv in pvs: + # Get pv list. + lspv_list = {} + for line in current_lspv.splitlines(): + pv_data = line.split() + lspv_list[pv_data[0]] = pv_data[2] + + # Check if pv exists and is free. + if pv not in lspv_list.keys(): + module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) + + if lspv_list[pv] == 'None': + # Disk None, looks free. + # Check if PV is not already in use by Oracle ASM. + lquerypv_cmd = module.get_bin_path('lquerypv', True) + rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"]) + if rc != 0: + module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) + + if 'ORCLDISK' in current_lquerypv: + module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) + + msg = "Physical volume '%s' is ok to be used." % pv + return True, msg + + # Check if PV is already in use for the same vg. + elif vg != lspv_list[pv]: + module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) + + msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) + return False, msg + + +def _validate_vg(module, vg): + """ + Check the current state of volume group. + + :param module: Ansible module argument spec. + :param vg: Volume Group name. + :return: True (VG in varyon state) or False (VG in varyoff state) or + None (VG does not exist), message. + """ + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) + if rc != 0: + module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + + if vg in current_all_vgs and vg not in current_active_vgs: + msg = "Volume group '%s' is in varyoff state." % vg + return False, msg + + if vg in current_active_vgs: + msg = "Volume group '%s' is in varyon state." % vg + return True, msg + + msg = "Volume group '%s' does not exist." % vg + return None, msg + + +def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): + """ Creates or extend a volume group. """ + + # Command option parameters. + force_opt = { + True: '-f', + False: '' + } + + vg_opt = { + 'normal': '', + 'big': '-B', + 'scalable': '-S', + } + + # Validate if PV are not already in use. + pv_state, msg = _validate_pv(module, vg, pvs) + if not pv_state: + changed = False + return changed, msg + + vg_state, msg = vg_validation + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is True: + # Volume group extension. + changed = True + msg = "" + + if not module.check_mode: + extendvg_cmd = module.get_bin_path('extendvg', True) + rc, output, err = module.run_command([extendvg_cmd, vg] + pvs) + if rc != 0: + changed = False + msg = "Extending volume group '%s' has failed." % vg + return changed, msg + + msg = "Volume group '%s' extended." % vg + return changed, msg + + elif vg_state is None: + # Volume group creation. + changed = True + msg = '' + + if not module.check_mode: + mkvg_cmd = module.get_bin_path('mkvg', True) + rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs) + if rc != 0: + changed = False + msg = "Creating volume group '%s' failed." % vg + return changed, msg + + msg = "Volume group '%s' created." % vg + return changed, msg + + +def reduce_vg(module, vg, pvs, vg_validation): + vg_state, msg = vg_validation + + if vg_state is False: + changed = False + return changed, msg + + elif vg_state is None: + changed = False + return changed, msg + + # Define pvs_to_remove (list of physical volumes to be removed). + if pvs is None: + # Remove VG if pvs are note informed. + # Remark: AIX will permit remove only if the VG has not LVs. + lsvg_cmd = module.get_bin_path('lsvg', True) + rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg]) + if rc != 0: + module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) + + pvs_to_remove = [] + for line in current_pvs.splitlines()[2:]: + pvs_to_remove.append(line.split()[0]) + + reduce_msg = "Volume group '%s' removed." % vg + else: + pvs_to_remove = pvs + reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) + + # Reduce volume group. + if len(pvs_to_remove) <= 0: + changed = False + msg = "No physical volumes to remove." + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + reducevg_cmd = module.get_bin_path('reducevg', True) + rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove) + if rc != 0: + module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) + + msg = reduce_msg + return changed, msg + + +def state_vg(module, vg, state, vg_validation): + vg_state, msg = vg_validation + + if vg_state is None: + module.fail_json(msg=msg) + + if state == 'varyon': + if vg_state is True: + changed = False + return changed, msg + + changed = True + msg = '' + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyonvg', True) + rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg]) + if rc != 0: + module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) + + msg = "Varyon volume group %s completed." % vg + return changed, msg + + elif state == 'varyoff': + if vg_state is False: + changed = False + return changed, msg + + changed = True + msg = '' + + if not module.check_mode: + varyonvg_cmd = module.get_bin_path('varyoffvg', True) + rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg]) + if rc != 0: + module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) + + msg = "Varyoff volume group %s completed." % vg + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool', default=False), + pp_size=dict(type='int'), + pvs=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']), + vg=dict(type='str', required=True), + vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable']) + ), + supports_check_mode=True, + ) + + force = module.params['force'] + pp_size = module.params['pp_size'] + pvs = module.params['pvs'] + state = module.params['state'] + vg = module.params['vg'] + vg_type = module.params['vg_type'] + + if pp_size is None: + pp_size = '' + else: + pp_size = "-s %s" % pp_size + + vg_validation = _validate_vg(module, vg) + + if state == 'present': + if not pvs: + changed = False + msg = "pvs is required to state 'present'." + module.fail_json(msg=msg) + else: + changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation) + + elif state == 'absent': + changed, msg = reduce_vg(module, vg, pvs, vg_validation) + + elif state == 'varyon' or state == 'varyoff': + changed, msg = state_vg(module, vg, state, vg_validation) + + else: + changed = False + msg = "Unexpected state" + + module.exit_json(changed=changed, msg=msg, state=state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py new file mode 100644 index 000000000..0a4a6eff5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/aix_lvol.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Alain Dejoux +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +author: + - Alain Dejoux (@adejoux) +module: aix_lvol +short_description: Configure AIX LVM logical volumes +description: + - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + description: + - The volume group this logical volume is part of. + type: str + required: true + lv: + description: + - The name of the logical volume. + type: str + required: true + lv_type: + description: + - The type of the logical volume. + type: str + default: jfs2 + size: + description: + - The size of the logical volume with one of the [MGT] units. + type: str + copies: + description: + - The number of copies of the logical volume. + - Maximum copies are 3. + type: int + default: 1 + policy: + description: + - Sets the interphysical volume allocation policy. + - C(maximum) allocates logical partitions across the maximum number of physical volumes. + - C(minimum) allocates logical partitions across the minimum number of physical volumes. + type: str + choices: [ maximum, minimum ] + default: maximum + state: + description: + - Control if the logical volume exists. If C(present) and the + volume does not already exist then the C(size) option is required. + type: str + choices: [ absent, present ] + default: present + opts: + description: + - Free-form options to be passed to the mklv command. + type: str + default: '' + pvs: + description: + - A list of physical volumes e.g. C(hdisk1,hdisk2). + type: list + elements: str + default: [] +''' + +EXAMPLES = r''' +- name: Create a logical volume of 512M + community.general.aix_lvol: + vg: testvg + lv: testlv + size: 512M + +- name: Create a logical volume of 512M with disks hdisk1 and hdisk2 + community.general.aix_lvol: + vg: testvg + lv: test2lv + size: 512M + pvs: [ hdisk1, hdisk2 ] + +- name: Create a logical volume of 512M mirrored + community.general.aix_lvol: + vg: testvg + lv: test3lv + size: 512M + copies: 2 + +- name: Create a logical volume of 1G with a minimum placement policy + community.general.aix_lvol: + vg: rootvg + lv: test4lv + size: 1G + policy: minimum + +- name: Create a logical volume with special options like mirror pool + community.general.aix_lvol: + vg: testvg + lv: testlv + size: 512M + opts: -p copy1=poolA -p copy2=poolB + +- name: Extend the logical volume to 1200M + community.general.aix_lvol: + vg: testvg + lv: test4lv + size: 1200M + +- name: Remove the logical volume + community.general.aix_lvol: + vg: testvg + lv: testlv + state: absent +''' + +RETURN = r''' +msg: + type: str + description: A friendly message describing the task result. + returned: always + sample: Logical volume testlv created. +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def convert_size(module, size): + unit = size[-1].upper() + units = ['M', 'G', 'T'] + try: + multiplier = 1024 ** units.index(unit) + except ValueError: + module.fail_json(msg="No valid size unit specified.") + + return int(size[:-1]) * multiplier + + +def round_ppsize(x, base=16): + new_size = int(base * round(float(x) / base)) + if new_size < x: + new_size += base + return new_size + + +def parse_lv(data): + name = None + + for line in data.splitlines(): + match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + vg = match.group(2) + continue + match = re.search(r"LPs:\s+(\d+).*PPs", line) + if match is not None: + lps = int(match.group(1)) + continue + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + match = re.search(r"INTER-POLICY:\s+(\w+)", line) + if match is not None: + policy = match.group(1) + continue + + if not name: + return None + + size = lps * pp_size + + return {'name': name, 'vg': vg, 'size': size, 'policy': policy} + + +def parse_vg(data): + + for line in data.splitlines(): + + match = re.search(r"VOLUME GROUP:\s+(\w+)", line) + if match is not None: + name = match.group(1) + continue + + match = re.search(r"TOTAL PP.*\((\d+)", line) + if match is not None: + size = int(match.group(1)) + continue + + match = re.search(r"PP SIZE:\s+(\d+)", line) + if match is not None: + pp_size = int(match.group(1)) + continue + + match = re.search(r"FREE PP.*\((\d+)", line) + if match is not None: + free = int(match.group(1)) + continue + + return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str', required=True), + lv_type=dict(type='str', default='jfs2'), + size=dict(type='str'), + opts=dict(type='str', default=''), + copies=dict(type='int', default=1), + state=dict(type='str', default='present', choices=['absent', 'present']), + policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), + pvs=dict(type='list', elements='str', default=list()) + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + lv = module.params['lv'] + lv_type = module.params['lv_type'] + size = module.params['size'] + opts = module.params['opts'] + copies = module.params['copies'] + policy = module.params['policy'] + state = module.params['state'] + pvs = module.params['pvs'] + + pv_list = ' '.join(pvs) + + if policy == 'maximum': + lv_policy = 'x' + else: + lv_policy = 'm' + + # Add echo command when running in check-mode + if module.check_mode: + test_opt = 'echo ' + else: + test_opt = '' + + # check if system commands are available + lsvg_cmd = module.get_bin_path("lsvg", required=True) + lslv_cmd = module.get_bin_path("lslv", required=True) + + # Get information on volume group requested + rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) + + this_vg = parse_vg(vg_info) + + if size is not None: + # Calculate pp size and round it up based on pp size. + lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) + + # Get information on logical volume requested + rc, lv_info, err = module.run_command( + "%s %s" % (lslv_cmd, lv)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) + + changed = False + + this_lv = parse_lv(lv_info) + + if state == 'present' and not size: + if this_lv is None: + module.fail_json(msg="No size given.") + + if this_lv is None: + if state == 'present': + if lv_size > this_vg['free']: + module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) + + # create LV + mklv_cmd = module.get_bin_path("mklv", required=True) + + cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s created." % lv) + else: + module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) + else: + if state == 'absent': + # remove LV + rmlv_cmd = module.get_bin_path("rmlv", required=True) + rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) + else: + module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) + else: + if this_lv['policy'] != policy: + # change lv allocation policy + chlv_cmd = module.get_bin_path("chlv", required=True) + rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) + else: + module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) + + if vg != this_lv['vg']: + module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) + + # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. + if not size: + module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) + + # resize LV based on absolute values + if int(lv_size) > this_lv['size']: + extendlv_cmd = module.get_bin_path("extendlv", required=True) + cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) + rc, out, err = module.run_command(cmd) + if rc == 0: + module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) + else: + module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) + elif lv_size < this_lv['size']: + module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) + else: + module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/alerta_customer.py b/ansible_collections/community/general/plugins/modules/alerta_customer.py new file mode 100644 index 000000000..120d98932 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/alerta_customer.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Christian Wollinger <@cwollinger> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: alerta_customer +short_description: Manage customers in Alerta +version_added: 4.8.0 +description: + - Create or delete customers in Alerta with the REST API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Alerta API + link: https://docs.alerta.io/api/reference.html#customers +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + customer: + description: + - Name of the customer. + required: true + type: str + match: + description: + - The matching logged in user for the customer. + required: true + type: str + alerta_url: + description: + - The Alerta API endpoint. + required: true + type: str + api_username: + description: + - The username for the API using basic auth. + type: str + api_password: + description: + - The password for the API using basic auth. + type: str + api_key: + description: + - The access token for the API. + type: str + state: + description: + - Whether the customer should exist or not. + - Both I(customer) and I(match) identify a customer that should be added or removed. + type: str + choices: [ absent, present ] + default: present +''' + +EXAMPLES = """ +- name: Create customer + community.general.alerta_customer: + alerta_url: https://alerta.example.com + api_username: admin@example.com + api_password: password + customer: Developer + match: dev@example.com + +- name: Delete customer + community.general.alerta_customer: + alerta_url: https://alerta.example.com + api_username: admin@example.com + api_password: password + customer: Developer + match: dev@example.com + state: absent +""" + +RETURN = """ +msg: + description: + - Success or failure message. + returned: always + type: str + sample: Customer customer1 created +response: + description: + - The response from the API. + returned: always + type: dict +""" + +from ansible.module_utils.urls import fetch_url, basic_auth_header +from ansible.module_utils.basic import AnsibleModule + + +class AlertaInterface(object): + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.customer = module.params['customer'] + self.match = module.params['match'] + self.alerta_url = module.params['alerta_url'] + self.headers = {"Content-Type": "application/json"} + + if module.params.get('api_key', None): + self.headers["Authorization"] = "Key %s" % module.params['api_key'] + else: + self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password']) + + def send_request(self, url, data=None, method="GET"): + response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) + + status_code = info["status"] + if status_code == 401: + self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url)) + elif status_code == 403: + self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url)) + elif status_code == 404: + self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url)) + elif status_code in (200, 201): + return self.module.from_json(response.read()) + self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url)) + + def get_customers(self): + url = "%s/api/customers" % self.alerta_url + response = self.send_request(url) + pages = response["pages"] + if pages > 1: + for page in range(2, pages + 1): + page_url = url + '?page=' + str(page) + new_results = self.send_request(page_url) + response.update(new_results) + return response + + def create_customer(self): + url = "%s/api/customer" % self.alerta_url + + payload = { + 'customer': self.customer, + 'match': self.match, + } + + payload = self.module.jsonify(payload) + response = self.send_request(url, payload, 'POST') + return response + + def delete_customer(self, id): + url = "%s/api/customer/%s" % (self.alerta_url, id) + + response = self.send_request(url, None, 'DELETE') + return response + + def find_customer_id(self, customer): + for i in customer['customers']: + if self.customer == i['customer'] and self.match == i['match']: + return i['id'] + return None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + customer=dict(type='str', required=True), + match=dict(type='str', required=True), + alerta_url=dict(type='str', required=True), + api_username=dict(type='str'), + api_password=dict(type='str', no_log=True), + api_key=dict(type='str', no_log=True), + ), + required_together=[['api_username', 'api_password']], + mutually_exclusive=[['api_username', 'api_key']], + supports_check_mode=True + ) + + alerta_iface = AlertaInterface(module) + + if alerta_iface.state == 'present': + response = alerta_iface.get_customers() + if alerta_iface.find_customer_id(response): + module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer) + else: + if not module.check_mode: + response = alerta_iface.create_customer() + module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer) + else: + response = alerta_iface.get_customers() + id = alerta_iface.find_customer_id(response) + if id: + if not module.check_mode: + alerta_iface.delete_customer(id) + module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id)) + else: + module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/ali_instance.py b/ansible_collections/community/general/plugins/modules/ali_instance.py new file mode 100644 index 000000000..232c21ee0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ali_instance.py @@ -0,0 +1,1012 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ali_instance +short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security Group +description: + - Create, start, stop, restart, modify or terminate ecs instances. + - Add or remove ecs instances to/from security group. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - The state of the instance after operating. + default: 'present' + choices: ['present', 'running', 'stopped', 'restarted', 'absent'] + type: str + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance. + If it is not specified, it will be allocated by system automatically. + aliases: ['alicloud_zone', 'zone_id'] + type: str + image_id: + description: + - Image ID used to launch instances. Required when I(state=present) and creating new ECS instances. + aliases: ['image'] + type: str + instance_type: + description: + - Instance type used to launch instances. Required when I(state=present) and creating new ECS instances. + aliases: ['type'] + type: str + security_groups: + description: + - A list of security group IDs. + aliases: ['group_ids'] + type: list + elements: str + vswitch_id: + description: + - The subnet ID in which to launch the instances (VPC). + aliases: ['subnet_id'] + type: str + instance_name: + description: + - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an + uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-". + It cannot begin with http:// or https://. + aliases: ['name'] + type: str + description: + description: + - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://. + type: str + internet_charge_type: + description: + - Internet charge type of ECS instance. + default: 'PayByBandwidth' + choices: ['PayByBandwidth', 'PayByTraffic'] + type: str + max_bandwidth_in: + description: + - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). + default: 200 + type: int + max_bandwidth_out: + description: + - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). + Required when I(allocate_public_ip=true). Ignored when I(allocate_public_ip=false). + default: 0 + type: int + host_name: + description: + - Instance host name. Ordered hostname is not supported. + type: str + unique_suffix: + description: + - Specifies whether to add sequential suffixes to the host_name. + The sequential suffix ranges from 001 to 999. + default: false + type: bool + version_added: '0.2.0' + password: + description: + - The password to login instance. After rebooting instances, modified password will take effect. + type: str + system_disk_category: + description: + - Category of the system disk. + default: 'cloud_efficiency' + choices: ['cloud_efficiency', 'cloud_ssd'] + type: str + system_disk_size: + description: + - Size of the system disk, in GB. The valid values are 40~500. + default: 40 + type: int + system_disk_name: + description: + - Name of the system disk. + type: str + system_disk_description: + description: + - Description of the system disk. + type: str + count: + description: + - The number of the new instance. An integer value which indicates how many instances that match I(count_tag) + should be running. Instances are either created or terminated based on this value. + default: 1 + type: int + count_tag: + description: + - I(count) determines how many instances based on a specific tag criteria should be present. + This can be expressed in multiple ways and is shown in the EXAMPLES section. + The specified count_tag must already exist or be passed in as the I(tags) option. + If it is not specified, it will be replaced by I(instance_name). + type: str + allocate_public_ip: + description: + - Whether allocate a public ip for the new instance. + default: false + aliases: [ 'assign_public_ip' ] + type: bool + instance_charge_type: + description: + - The charge type of the instance. + choices: ['PrePaid', 'PostPaid'] + default: 'PostPaid' + type: str + period: + description: + - The charge duration of the instance, in months. Required when I(instance_charge_type=PrePaid). + - The valid value are [1-9, 12, 24, 36]. + default: 1 + type: int + auto_renew: + description: + - Whether automate renew the charge of the instance. + type: bool + default: false + auto_renew_period: + description: + - The duration of the automatic renew the charge of the instance. Required when I(auto_renew=true). + choices: [1, 2, 3, 6, 12] + type: int + instance_ids: + description: + - A list of instance ids. It is required when need to operate existing instances. + If it is specified, I(count) will lose efficacy. + type: list + elements: str + force: + description: + - Whether the current operation needs to be execute forcibly. + default: false + type: bool + tags: + description: + - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"}) + aliases: ["instance_tags"] + type: dict + version_added: '0.2.0' + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. + If True, it means you have to specify all the desired tags on each task affecting an instance. + default: false + type: bool + version_added: '0.2.0' + key_name: + description: + - The name of key pair which is used to access ECS instance in SSH. + required: false + type: str + aliases: ['keypair'] + user_data: + description: + - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. + It only will take effect when launching the new ECS instances. + required: false + type: str + ram_role_name: + description: + - The name of the instance RAM role. + type: str + version_added: '0.2.0' + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal + places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. + type: float + version_added: '0.2.0' + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid. + choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] + default: 'NoSpot' + type: str + version_added: '0.2.0' + period_unit: + description: + - The duration unit that you will buy the resource. It is valid when I(instance_charge_type=PrePaid). + choices: ['Month', 'Week'] + default: 'Month' + type: str + version_added: '0.2.0' + dry_run: + description: + - Specifies whether to send a dry-run request. + - If I(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the + required parameters are set, and validates the request format, service permissions, and available ECS instances. + If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. + - If I(dry_run=false), A request is sent. If the validation succeeds, the instance is created. + default: false + type: bool + version_added: '0.2.0' + include_data_disks: + description: + - Whether to change instance disks charge type when changing instance charge type. + default: true + type: bool + version_added: '0.2.0' +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "python >= 3.6" + - "footmark >= 1.19.0" +extends_documentation_fragment: + - community.general.alicloud + - community.general.attributes +''' + +EXAMPLES = ''' +# basic provisioning example vpc network +- name: Basic provisioning example + hosts: localhost + vars: + alicloud_access_key: + alicloud_secret_key: + alicloud_region: cn-beijing + image: ubuntu1404_64_40G_cloudinit_20160727.raw + instance_type: ecs.n4.small + vswitch_id: vsw-abcd1234 + assign_public_ip: true + max_bandwidth_out: 10 + host_name: myhost + password: mypassword + system_disk_category: cloud_efficiency + system_disk_size: 100 + internet_charge_type: PayByBandwidth + security_groups: ["sg-f2rwnfh23r"] + + instance_ids: ["i-abcd12346", "i-abcd12345"] + force: true + + tasks: + - name: Launch ECS instance in VPC network + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + vswitch_id: '{{ vswitch_id }}' + assign_public_ip: '{{ assign_public_ip }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + tags: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: With count and count_tag to create a number of instances + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + image: '{{ image }}' + system_disk_category: '{{ system_disk_category }}' + system_disk_size: '{{ system_disk_size }}' + instance_type: '{{ instance_type }}' + assign_public_ip: '{{ assign_public_ip }}' + security_groups: '{{ security_groups }}' + internet_charge_type: '{{ internet_charge_type }}' + max_bandwidth_out: '{{ max_bandwidth_out }}' + tags: + Name: created_one + Version: 0.1 + count: 2 + count_tag: + Name: created_one + host_name: '{{ host_name }}' + password: '{{ password }}' + + - name: Start instance + community.general.ali_instance: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'running' + + - name: Reboot instance forcibly + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + state: 'restarted' + force: '{{ force }}' + + - name: Add instances to an security group + ecs: + alicloud_access_key: '{{ alicloud_access_key }}' + alicloud_secret_key: '{{ alicloud_secret_key }}' + alicloud_region: '{{ alicloud_region }}' + instance_ids: '{{ instance_ids }}' + security_groups: '{{ security_groups }}' +''' + +RETURN = ''' +instances: + description: List of ECS instances + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance will expire. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or eip address + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The id of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + user_data: + description: User-defined data. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. + returned: always + type: float + sample: 0.97 + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. + returned: always + type: str + sample: NoSpot +ids: + description: List of ECS instance IDs + returned: always + type: list + sample: [i-12345er, i-3245fs] +''' + +import re +import time +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) + + +def get_instances_info(connection, ids): + result = [] + instances = connection.describe_instances(instance_ids=ids) + if len(instances) > 0: + for inst in instances: + volumes = connection.describe_disks(instance_id=inst.id) + setattr(inst, 'block_device_mappings', volumes) + setattr(inst, 'user_data', inst.describe_user_data()) + result.append(inst.read()) + return result + + +def run_instance(module, ecs, exact_count): + if exact_count <= 0: + return None + zone_id = module.params['availability_zone'] + image_id = module.params['image_id'] + instance_type = module.params['instance_type'] + security_groups = module.params['security_groups'] + vswitch_id = module.params['vswitch_id'] + instance_name = module.params['instance_name'] + description = module.params['description'] + internet_charge_type = module.params['internet_charge_type'] + max_bandwidth_out = module.params['max_bandwidth_out'] + max_bandwidth_in = module.params['max_bandwidth_in'] + host_name = module.params['host_name'] + password = module.params['password'] + system_disk_category = module.params['system_disk_category'] + system_disk_size = module.params['system_disk_size'] + system_disk_name = module.params['system_disk_name'] + system_disk_description = module.params['system_disk_description'] + allocate_public_ip = module.params['allocate_public_ip'] + period = module.params['period'] + auto_renew = module.params['auto_renew'] + instance_charge_type = module.params['instance_charge_type'] + auto_renew_period = module.params['auto_renew_period'] + user_data = module.params['user_data'] + key_name = module.params['key_name'] + ram_role_name = module.params['ram_role_name'] + spot_price_limit = module.params['spot_price_limit'] + spot_strategy = module.params['spot_strategy'] + unique_suffix = module.params['unique_suffix'] + # check whether the required parameter passed or not + if not image_id: + module.fail_json(msg='image_id is required for new instance') + if not instance_type: + module.fail_json(msg='instance_type is required for new instance') + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + if len(security_groups) <= 0: + module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') + + client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) + + try: + # call to create_instance method from footmark + instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0], + zone_id=zone_id, instance_name=instance_name, description=description, + internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out, + internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password, + io_optimized='optimized', system_disk_category=system_disk_category, + system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name, + system_disk_description=system_disk_description, vswitch_id=vswitch_id, + amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month", + auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name, + user_data=user_data, client_token=client_token, ram_role_name=ram_role_name, + spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) + + except Exception as e: + module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) + + return instances + + +def modify_instance(module, instance): + # According to state to modify instance's some special attribute + state = module.params["state"] + name = module.params['instance_name'] + unique_suffix = module.params['unique_suffix'] + if not name: + name = instance.name + + description = module.params['description'] + if not description: + description = instance.description + + host_name = module.params['host_name'] + if unique_suffix and host_name: + suffix = instance.host_name[-3:] + host_name = host_name + suffix + + if not host_name: + host_name = instance.host_name + + # password can be modified only when restart instance + password = "" + if state == "restarted": + password = module.params['password'] + + # userdata can be modified only when instance is stopped + setattr(instance, "user_data", instance.describe_user_data()) + user_data = instance.user_data + if state == "stopped": + user_data = module.params['user_data'].encode() + + try: + return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) + except Exception as e: + module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e)) + + +def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300): + """ + To verify instance charge type has become expected after modify instance charge type + """ + try: + while True: + instances = ecs.describe_instances(instance_ids=instance_ids) + flag = True + for inst in instances: + if inst and inst.instance_charge_type != charge_type: + flag = False + if flag: + return + timeout -= delay + time.sleep(delay) + if timeout <= 0: + raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type)) + except Exception as e: + raise e + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + security_groups=dict(type='list', elements='str', aliases=['group_ids']), + availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']), + instance_type=dict(type='str', aliases=['type']), + image_id=dict(type='str', aliases=['image']), + count=dict(type='int', default=1), + count_tag=dict(type='str'), + vswitch_id=dict(type='str', aliases=['subnet_id']), + instance_name=dict(type='str', aliases=['name']), + host_name=dict(type='str'), + password=dict(type='str', no_log=True), + internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']), + max_bandwidth_in=dict(type='int', default=200), + max_bandwidth_out=dict(type='int', default=0), + system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']), + system_disk_size=dict(type='int', default=40), + system_disk_name=dict(type='str'), + system_disk_description=dict(type='str'), + force=dict(type='bool', default=False), + tags=dict(type='dict', aliases=['instance_tags']), + purge_tags=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']), + description=dict(type='str'), + allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False), + instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']), + period=dict(type='int', default=1), + auto_renew=dict(type='bool', default=False), + instance_ids=dict(type='list', elements='str'), + auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]), + key_name=dict(type='str', aliases=['keypair']), + user_data=dict(type='str'), + ram_role_name=dict(type='str'), + spot_price_limit=dict(type='float'), + spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']), + unique_suffix=dict(type='bool', default=False), + period_unit=dict(type='str', default='Month', choices=['Month', 'Week']), + dry_run=dict(type='bool', default=False), + include_data_disks=dict(type='bool', default=True) + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + host_name = module.params['host_name'] + state = module.params['state'] + instance_ids = module.params['instance_ids'] + count_tag = module.params['count_tag'] + count = module.params['count'] + instance_name = module.params['instance_name'] + force = module.params['force'] + zone_id = module.params['availability_zone'] + key_name = module.params['key_name'] + tags = module.params['tags'] + max_bandwidth_out = module.params['max_bandwidth_out'] + instance_charge_type = module.params['instance_charge_type'] + if instance_charge_type == "PrePaid": + module.params['spot_strategy'] = '' + changed = False + + instances = [] + if instance_ids: + if not isinstance(instance_ids, list): + module.fail_json(msg='The parameter instance_ids should be a list, aborting') + instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) + if not instances: + module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " + "Please check it and try again.".format(instance_ids)) + elif count_tag: + instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) + elif instance_name: + instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name) + + ids = [] + if state == 'absent': + if len(instances) < 1: + module.fail_json(msg='Please specify ECS instances that you want to operate by using ' + 'parameters instance_ids, tags or instance_name, aborting') + try: + targets = [] + for inst in instances: + if inst.status != 'stopped' and not force: + module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.") + targets.append(inst.id) + if ecs.delete_instances(instance_ids=targets, force=force): + changed = True + ids.extend(targets) + + module.exit_json(changed=changed, ids=ids, instances=[]) + except Exception as e: + module.fail_json(msg='Delete instance got an error: {0}'.format(e)) + + if module.params['allocate_public_ip'] and max_bandwidth_out < 0: + module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") + if not module.params['allocate_public_ip']: + module.params['max_bandwidth_out'] = 0 + + if state == 'present': + if not instance_ids: + if len(instances) > count: + for i in range(0, len(instances) - count): + inst = instances[len(instances) - 1] + if inst.status != 'stopped' and not force: + module.fail_json(msg="That to delete instance {0} is failed results from it is running, " + "and please stop it or set 'force' as True.".format(inst.id)) + try: + if inst.terminate(force=force): + changed = True + except Exception as e: + module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) + instances.pop(len(instances) - 1) + else: + try: + if re.search(r"-\[\d+,\d+\]-", host_name): + module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered ' + 'suffix to the hostname, you can set unique_suffix to True') + new_instances = run_instance(module, ecs, count - len(instances)) + if new_instances: + changed = True + instances.extend(new_instances) + except Exception as e: + module.fail_json(msg="Create new instances got an error: {0}".format(e)) + + # Security Group join/leave begin + security_groups = module.params['security_groups'] + if security_groups: + if not isinstance(security_groups, list): + module.fail_json(msg='The parameter security_groups should be a list, aborting') + for inst in instances: + existing = inst.security_group_ids['security_group_id'] + remove = list(set(existing).difference(set(security_groups))) + add = list(set(security_groups).difference(set(existing))) + for sg in remove: + if inst.leave_security_group(sg): + changed = True + for sg in add: + if inst.join_security_group(sg): + changed = True + # Security Group join/leave ends here + + # Attach/Detach key pair + inst_ids = [] + for inst in instances: + if key_name is not None and key_name != inst.key_name: + if key_name == "": + if inst.detach_key_pair(): + changed = True + else: + inst_ids.append(inst.id) + if inst_ids: + changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name) + + # Modify instance attribute + for inst in instances: + if modify_instance(module, inst): + changed = True + if inst.id not in ids: + ids.append(inst.id) + + # Modify instance charge type + ids = [] + for inst in instances: + if inst.instance_charge_type != instance_charge_type: + ids.append(inst.id) + if ids: + params = {"instance_ids": ids, "instance_charge_type": instance_charge_type, + "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'], + "auto_pay": True} + if instance_charge_type == 'PrePaid': + params['period'] = module.params['period'] + params['period_unit'] = module.params['period_unit'] + + if ecs.modify_instance_charge_type(**params): + changed = True + wait_for_instance_modify_charge(ecs, ids, instance_charge_type) + + else: + if len(instances) < 1: + module.fail_json(msg='Please specify ECS instances that you want to operate by using ' + 'parameters instance_ids, tags or instance_name, aborting') + if state == 'running': + try: + targets = [] + for inst in instances: + if modify_instance(module, inst): + changed = True + if inst.status != "running": + targets.append(inst.id) + ids.append(inst.id) + if targets and ecs.start_instances(instance_ids=targets): + changed = True + ids.extend(targets) + except Exception as e: + module.fail_json(msg='Start instances got an error: {0}'.format(e)) + elif state == 'stopped': + try: + targets = [] + for inst in instances: + if inst.status != "stopped": + targets.append(inst.id) + if targets and ecs.stop_instances(instance_ids=targets, force_stop=force): + changed = True + ids.extend(targets) + for inst in instances: + if modify_instance(module, inst): + changed = True + except Exception as e: + module.fail_json(msg='Stop instances got an error: {0}'.format(e)) + elif state == 'restarted': + try: + targets = [] + for inst in instances: + if modify_instance(module, inst): + changed = True + targets.append(inst.id) + if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']): + changed = True + ids.extend(targets) + except Exception as e: + module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) + + tags = module.params['tags'] + if module.params['purge_tags']: + for inst in instances: + if not tags: + tags = inst.tags + try: + if inst.remove_tags(tags): + changed = True + except Exception as e: + module.fail_json(msg="{0}".format(e)) + module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) + + if tags: + for inst in instances: + try: + if inst.add_tags(tags): + changed = True + except Exception as e: + module.fail_json(msg="{0}".format(e)) + module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/ali_instance_info.py new file mode 100644 index 000000000..e7ec7f395 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ali_instance_info.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ali_instance_info +short_description: Gather information on instances of Alibaba Cloud ECS +description: + - This module fetches data from the Open API in Alicloud. + The module must be called from within the ECS instance itself. + - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name_prefix: + description: + - Use a instance name prefix to filter ecs instances. + type: str + version_added: '0.2.0' + tags: + description: + - A hash/dictionaries of instance tags. C({"key":"value"}) + aliases: ["instance_tags"] + type: dict + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be + all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. + Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to + connect different words in one parameter. 'InstanceIds' should be a list. + 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead. + type: dict + version_added: '0.2.0' +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "python >= 3.6" + - "footmark >= 1.13.0" +extends_documentation_fragment: + - community.general.alicloud + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = ''' +# Fetch instances details according to setting different filters + +- name: Find all instances in the specified region + community.general.ali_instance_info: + register: all_instances + +- name: Find all instances based on the specified ids + community.general.ali_instance_info: + instance_ids: + - "i-35b333d9" + - "i-ddav43kd" + register: instances_by_ids + +- name: Find all instances based on the specified name_prefix + community.general.ali_instance_info: + name_prefix: "ecs_instance_" + register: instances_by_name_prefix + +- name: Find instances based on tags + community.general.ali_instance_info: + tags: + Test: "add" +''' + +RETURN = ''' +instances: + description: List of ECS instances + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance will expire. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or eip address + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The id of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 +ids: + description: List of ECS instance IDs + returned: always + type: list + sample: [i-12345er, i-3245fs] +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + name_prefix=dict(type='str'), + tags=dict(type='dict', aliases=['instance_tags']), + filters=dict(type='dict') + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + + instances = [] + instance_ids = [] + ids = [] + name_prefix = module.params['name_prefix'] + + filters = module.params['filters'] + if not filters: + filters = {} + for key, value in list(filters.items()): + if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): + for id in value: + if id not in ids: + ids.append(value) + if ids: + filters['instance_ids'] = ids + if module.params['tags']: + filters['tags'] = module.params['tags'] + + for inst in ecs.describe_instances(**filters): + if name_prefix: + if not str(inst.instance_name).startswith(name_prefix): + continue + volumes = ecs.describe_disks(instance_id=inst.id) + setattr(inst, 'block_device_mappings', volumes) + setattr(inst, 'user_data', inst.describe_user_data()) + instances.append(inst.read()) + instance_ids.append(inst.id) + + module.exit_json(changed=False, ids=instance_ids, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/alternatives.py b/ansible_collections/community/general/plugins/modules/alternatives.py new file mode 100644 index 000000000..97d4f51fb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/alternatives.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Gabe Mulley +# Copyright (c) 2015, David Wittman +# Copyright (c) 2022, Marius Rieder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: alternatives +short_description: Manages alternative programs for common commands +description: + - Manages symbolic links using the 'update-alternatives' tool. + - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). +author: + - Marius Rieder (@jiuka) + - David Wittman (@DavidWittman) + - Gabe Mulley (@mulby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - The generic name of the link. + type: str + required: true + path: + description: + - The path to the real executable that the link should point to. + type: path + required: true + link: + description: + - The path to the symbolic link that should point to the real executable. + - This option is always required on RHEL-based distributions. On Debian-based distributions this option is + required when the alternative I(name) is unknown to the system. + type: path + priority: + description: + - The priority of the alternative. If no priority is given for creation C(50) is used as a fallback. + type: int + state: + description: + - C(present) - install the alternative (if not already installed), but do + not set it as the currently selected alternative for the group. + - C(selected) - install the alternative (if not already installed), and + set it as the currently selected alternative for the group. + - C(auto) - install the alternative (if not already installed), and + set the group to auto mode. Added in community.general 5.1.0. + - C(absent) - removes the alternative. Added in community.general 5.1.0. + choices: [ present, selected, auto, absent ] + default: selected + type: str + version_added: 4.8.0 + subcommands: + description: + - A list of subcommands. + - Each subcommand needs a name, a link and a path parameter. + - Subcommands are also named 'slaves' or 'followers', depending on the version + of alternatives. + type: list + elements: dict + aliases: ['slaves'] + suboptions: + name: + description: + - The generic name of the subcommand. + type: str + required: true + path: + description: + - The path to the real executable that the subcommand should point to. + type: path + required: true + link: + description: + - The path to the symbolic link that should point to the real subcommand executable. + type: path + required: true + version_added: 5.1.0 +requirements: [ update-alternatives ] +''' + +EXAMPLES = r''' +- name: Correct java version selected + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: Alternatives link created + community.general.alternatives: + name: hadoop-conf + link: /etc/hadoop/conf + path: /etc/hadoop/conf.ansible + +- name: Make java 32 bit an alternative with low priority + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java + priority: -10 + +- name: Install Python 3.5 but do not select it + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: present + +- name: Install Python 3.5 and reset selection to auto + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: auto + +- name: keytool is a subcommand of java + community.general.alternatives: + name: java + link: /usr/bin/java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + subcommands: + - name: keytool + link: /usr/bin/keytool + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class AlternativeState: + PRESENT = "present" + SELECTED = "selected" + ABSENT = "absent" + AUTO = "auto" + + @classmethod + def to_list(cls): + return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO] + + +class AlternativesModule(object): + _UPDATE_ALTERNATIVES = None + + def __init__(self, module): + self.module = module + self.result = dict(changed=False, diff=dict(before=dict(), after=dict())) + self.module.run_command_environ_update = {'LC_ALL': 'C'} + self.messages = [] + self.run() + + @property + def mode_present(self): + return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO] + + @property + def mode_selected(self): + return self.module.params.get('state') == AlternativeState.SELECTED + + @property + def mode_auto(self): + return self.module.params.get('state') == AlternativeState.AUTO + + def run(self): + self.parse() + + if self.mode_present: + # Check if we need to (re)install + subcommands_parameter = self.module.params['subcommands'] + priority_parameter = self.module.params['priority'] + if ( + self.path not in self.current_alternatives or + (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or + (subcommands_parameter is not None and ( + not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or + not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) + )) + ): + self.install() + + # Check if we need to set the preference + if self.mode_selected and self.current_path != self.path: + self.set() + + # Check if we need to reset to auto + if self.mode_auto and self.current_mode == 'manual': + self.auto() + else: + # Check if we need to uninstall + if self.path in self.current_alternatives: + self.remove() + + self.result['msg'] = ' '.join(self.messages) + self.module.exit_json(**self.result) + + def install(self): + if not os.path.exists(self.path): + self.module.fail_json(msg="Specified path %s does not exist" % self.path) + if not self.link: + self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') + + cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + + if self.module.params['subcommands'] is not None: + subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] + cmd += [item for sublist in subcommands for item in sublist] + + self.result['changed'] = True + self.messages.append("Install alternative '%s' for '%s'." % (self.path, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict( + state=AlternativeState.PRESENT, + path=self.path, + priority=self.priority, + link=self.link, + ) + if self.subcommands: + self.result['diff']['after'].update(dict( + subcommands=self.subcommands + )) + + def remove(self): + cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path] + self.result['changed'] = True + self.messages.append("Remove alternative '%s' from '%s'." % (self.path, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) + + def set(self): + cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path] + self.result['changed'] = True + self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name)) + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.SELECTED + + def auto(self): + cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name] + self.messages.append("Set alternative to auto for '%s'." % (self.name)) + self.result['changed'] = True + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.PRESENT + + @property + def name(self): + return self.module.params.get('name') + + @property + def path(self): + return self.module.params.get('path') + + @property + def link(self): + return self.module.params.get('link') or self.current_link + + @property + def priority(self): + if self.module.params.get('priority') is not None: + return self.module.params.get('priority') + return self.current_alternatives.get(self.path, {}).get('priority', 50) + + @property + def subcommands(self): + if self.module.params.get('subcommands') is not None: + return self.module.params.get('subcommands') + elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'): + return self.current_alternatives[self.path].get('subcommands') + return None + + @property + def UPDATE_ALTERNATIVES(self): + if self._UPDATE_ALTERNATIVES is None: + self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True) + return self._UPDATE_ALTERNATIVES + + def parse(self): + self.current_mode = None + self.current_path = None + self.current_link = None + self.current_alternatives = {} + + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, dummy) = self.module.run_command( + [self.UPDATE_ALTERNATIVES, '--display', self.name] + ) + + if rc != 0: + self.module.debug("No current alternative found. '%s' exited with %s" % (self.UPDATE_ALTERNATIVES, rc)) + return + + current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE) + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE) + current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) + subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) + + alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) + subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) + + match = current_mode_regex.search(display_output) + if not match: + self.module.debug("No current mode found in output") + return + self.current_mode = match.group(1) + + match = current_path_regex.search(display_output) + if not match: + self.module.debug("No current path found in output") + else: + self.current_path = match.group(1) + + match = current_link_regex.search(display_output) + if not match: + self.module.debug("No current link found in output") + else: + self.current_link = match.group(1) + + subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) + if not subcmd_path_map and self.subcommands: + subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands) + + for path, prio, subcmd in alternative_regex.findall(display_output): + self.current_alternatives[path] = dict( + priority=int(prio), + subcommands=[dict( + name=name, + path=spath, + link=subcmd_path_map.get(name) + ) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)'] + ) + + if self.module._diff: + if self.path in self.current_alternatives: + self.result['diff']['before'].update(dict( + state=AlternativeState.PRESENT, + path=self.path, + priority=self.current_alternatives[self.path].get('priority'), + link=self.current_link, + )) + if self.current_alternatives[self.path].get('subcommands'): + self.result['diff']['before'].update(dict( + subcommands=self.current_alternatives[self.path].get('subcommands') + )) + if self.current_mode == 'manual' and self.current_path != self.path: + self.result['diff']['before'].update(dict( + state=AlternativeState.SELECTED + )) + else: + self.result['diff']['before'].update(dict( + state=AlternativeState.ABSENT + )) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + path=dict(type='path', required=True), + link=dict(type='path'), + priority=dict(type='int'), + state=dict( + type='str', + choices=AlternativeState.to_list(), + default=AlternativeState.SELECTED, + ), + subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict( + name=dict(type='str', required=True), + path=dict(type='path', required=True), + link=dict(type='path', required=True), + )), + ), + supports_check_mode=True, + ) + + AlternativesModule(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py new file mode 100644 index 000000000..0f38eabdf --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: ansible_galaxy_install +author: + - "Alexei Znamensky (@russoz)" +short_description: Install Ansible roles or collections using ansible-galaxy +version_added: 3.5.0 +description: + - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +notes: + - > + B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and + ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. + - > + The module will try and run using the C(C.UTF-8) locale. + If that fails, it will try C(en_US.UTF-8). + If that one also fails, the module will fail. +requirements: + - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + type: + description: + - The type of installation performed by C(ansible-galaxy). + - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. + - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." + - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." + type: str + choices: [collection, role, both] + required: true + name: + description: + - Name of the collection or role being installed. + - > + Versions can be specified with C(ansible-galaxy) usual formats. + For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0). + - I(name) and I(requirements_file) are mutually exclusive. + type: str + requirements_file: + description: + - Path to a file containing a list of requirements to be installed. + - It works for I(type) equals to C(collection) and C(role). + - I(name) and I(requirements_file) are mutually exclusive. + - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." + type: path + dest: + description: + - The path to the directory containing your collections or roles, according to the value of I(type). + - > + Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) + contains both roles and collections and I(dest) is specified. + type: path + no_deps: + description: + - Refrain from installing dependencies. + version_added: 4.5.0 + type: bool + default: false + force: + description: + - Force overwriting an existing role or collection. + - Using I(force=true) is mandatory when downgrading. + - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." + type: bool + default: false + ack_ansible29: + description: + - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. + - This option is completely ignored if using a version of Ansible greater than C(2.9.x). + - Note that this option will be removed without any further deprecation warning once support + for Ansible 2.9 is removed from this module. + type: bool + default: false + ack_min_ansiblecore211: + description: + - Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10. + - Support for those versions will be removed in community.general 8.0.0. + At the same time, this option will be removed without any deprecation warning! + - This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11). + - For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true). + type: bool + default: false +""" + +EXAMPLES = """ +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.network + +- name: Install role at specific path + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + dest: /ansible/roles + +- name: Install collections and roles together + community.general.ansible_galaxy_install: + type: both + requirements_file: requirements.yml + +- name: Force-install collection community.network at specific version + community.general.ansible_galaxy_install: + type: collection + name: community.network:3.0.2 + force: true + +""" + +RETURN = """ + type: + description: The value of the I(type) parameter. + type: str + returned: always + name: + description: The value of the I(name) parameter. + type: str + returned: always + dest: + description: The value of the I(dest) parameter. + type: str + returned: always + requirements_file: + description: The value of the I(requirements_file) parameter. + type: str + returned: always + force: + description: The value of the I(force) parameter. + type: bool + returned: always + installed_roles: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If I(name) is specified, returns that role name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 + baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 + installed_collections: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If I(name) is specified, returns that collection name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 + new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 + new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + + +class AnsibleGalaxyInstall(ModuleHelper): + _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') + _RE_LIST_PATH = re.compile(r'^# (?P.*)$') + _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') + _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + ansible_version = None + is_ansible29 = None + + output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') + module = dict( + argument_spec=dict( + type=dict(type='str', choices=('collection', 'role', 'both'), required=True), + name=dict(type='str'), + requirements_file=dict(type='path'), + dest=dict(type='path'), + force=dict(type='bool', default=False), + no_deps=dict(type='bool', default=False), + ack_ansible29=dict(type='bool', default=False), + ack_min_ansiblecore211=dict(type='bool', default=False), + ), + mutually_exclusive=[('name', 'requirements_file')], + required_one_of=[('name', 'requirements_file')], + required_if=[('type', 'both', ['requirements_file'])], + supports_check_mode=False, + ) + + command = 'ansible-galaxy' + command_args_formats = dict( + type=fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=fmt.as_list(), + requirements_file=fmt.as_opt_val('-r'), + dest=fmt.as_opt_val('-p'), + force=fmt.as_bool("--force"), + no_deps=fmt.as_bool("--no-deps"), + version=fmt.as_bool("--version"), + name=fmt.as_list(), + ) + + def _make_runner(self, lang): + return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True) + + def _get_ansible_galaxy_version(self): + class UnsupportedLocale(ModuleHelperException): + pass + + def process(rc, out, err): + if (rc != 0 and "unsupported locale setting" in err) or (rc == 0 and "cannot change locale" in err): + raise UnsupportedLocale(msg=err) + line = out.splitlines()[0] + match = self._RE_GALAXY_VERSION.match(line) + if not match: + self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line)) + version = match.group("version") + version = tuple(int(x) for x in version.split('.')[:3]) + return version + + try: + runner = self._make_runner("C.UTF-8") + with runner("version", check_rc=False, output_process=process) as ctx: + return runner, ctx.run(version=True) + except UnsupportedLocale as e: + runner = self._make_runner("en_US.UTF-8") + with runner("version", check_rc=True, output_process=process) as ctx: + return runner, ctx.run(version=True) + + def __init_module__(self): + # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang) + self.runner, self.ansible_version = self._get_ansible_galaxy_version() + if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211: + self.module.deprecate( + "Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. " + "At the same time support for them is ended, also the ack_ansible29 option will be removed. " + "Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to suppress this message.", + version="8.0.0", + collection_name="community.general", + ) + self.is_ansible29 = self.ansible_version < (2, 10) + if self.is_ansible29: + self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' + r' was installed successfully)$') + else: + # Collection install output changed: + # ansible-base 2.10: "coll.name (x.y.z)" + # ansible-core 2.11+: "coll.name:x.y.z" + self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' + r' was installed successfully$') + + def _list_element(self, _type, path_re, elem_re): + def process(rc, out, err): + return [] if "None of the provided paths were usable" in out else out.splitlines() + + with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx: + elems = ctx.run(type=_type, galaxy_cmd='list') + + elems_dict = {} + current_path = None + for line in elems: + if line.startswith("#"): + match = path_re.match(line) + if not match: + continue + if self.vars.dest is not None and match.group('path') != self.vars.dest: + current_path = None + continue + current_path = match.group('path') if match else None + elems_dict[current_path] = {} + + elif current_path is not None: + match = elem_re.match(line) + if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + continue + elems_dict[current_path][match.group('elem')] = match.group('version') + return elems_dict + + def _list_collections(self): + return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + + def _list_roles(self): + return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + + def _setup29(self): + self.vars.set("new_collections", {}) + self.vars.set("new_roles", {}) + self.vars.set("ansible29_change", False, change=True, output=False) + if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211): + self.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") + if self.vars.requirements_file is not None and self.vars.type == 'both': + self.warn("Ansible 2.9 or older: will install only roles from requirement files") + + def _setup210plus(self): + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() + + def __run__(self): + def process(rc, out, err): + for line in out.splitlines(): + match = self._RE_INSTALL_OUTPUT.match(line) + if not match: + continue + if match.group("collection"): + self.vars.new_collections[match.group("collection")] = match.group("cversion") + if self.is_ansible29: + self.vars.ansible29_change = True + elif match.group("role"): + self.vars.new_roles[match.group("role")] = match.group("rversion") + if self.is_ansible29: + self.vars.ansible29_change = True + + if self.is_ansible29: + if self.vars.type == 'both': + raise ValueError("Type 'both' not supported in Ansible 2.9") + self._setup29() + else: + self._setup210plus() + with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install") + if self.verbosity > 2: + self.vars.set("run_info", ctx.run_info) + + +def main(): + AnsibleGalaxyInstall.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py new file mode 100644 index 000000000..8f561e8ae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py @@ -0,0 +1,452 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Olivier Boukili +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: apache2_mod_proxy +author: Olivier Boukili (@oboukili) +short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool +description: + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer + pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member + status page has to be enabled and accessible, as this module relies on parsing + this page. This module supports ansible check_mode, and requires BeautifulSoup + python module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + balancer_url_suffix: + type: str + description: + - Suffix of the balancer pool url required to access the balancer pool + status page (e.g. balancer_vhost[:port]/balancer_url_suffix). + default: /balancer-manager/ + balancer_vhost: + type: str + description: + - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool. + required: true + member_host: + type: str + description: + - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to. + Port number is autodetected and should not be specified here. + If undefined, apache2_mod_proxy module will return a members list of + dictionaries of all the current balancer pool members' attributes. + state: + type: str + description: + - Desired state of the member host. + (absent|disabled),drained,hot_standby,ignore_errors can be + simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors). + - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]' + tls: + description: + - Use https to access balancer management page. + type: bool + default: false + validate_certs: + description: + - Validate ssl/tls certificates. + type: bool + default: true +''' + +EXAMPLES = ''' +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: 10.0.0.2 + +- name: Get a specific member attributes + community.general.apache2_mod_proxy: + balancer_vhost: myws.mydomain.org + balancer_suffix: /lb/ + member_host: node1.myws.mydomain.org + +# Enable all balancer pool members: +- name: Get attributes + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + register: result + +- name: Enable all balancer pool members + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + member_host: '{{ item.host }}' + state: present + with_items: '{{ result.members }}' + +# Gracefully disable a member from a loadbalancer node: +- name: Step 1 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 2 + ansible.builtin.wait_for: + host: '{{ member.host }}' + port: '{{ member.port }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 3 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: absent + delegate_to: myloadbalancernode +''' + +RETURN = ''' +member: + description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter. + type: dict + returned: success + sample: + {"attributes": + {"Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } +members: + description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args. + returned: success + type: list + sample: + [{"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + }, + {"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false} + } + ] +''' + +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six import iteritems + +BEAUTIFUL_SOUP_IMP_ERR = None +try: + from BeautifulSoup import BeautifulSoup +except ImportError: + BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc() + HAS_BEAUTIFULSOUP = False +else: + HAS_BEAUTIFULSOUP = True + +# balancer member attributes extraction regexp: +EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" +# Apache2 server version extraction regexp: +APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)" + + +def regexp_extraction(string, _regexp, groups=1): + """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + regexp_search = re.search(string=str(string), pattern=str(_regexp)) + if regexp_search: + if regexp_search.group(groups) != '': + return str(regexp_search.group(groups)) + return None + + +class BalancerMember(object): + """ Apache 2.4 mod_proxy LB balancer member. + attributes: + read-only: + host -> member host (string), + management_url -> member management url (string), + protocol -> member protocol (string) + port -> member port (string), + path -> member location (string), + balancer_url -> url of this member's parent balancer (string), + attributes -> whole member attributes (dictionary) + module -> ansible module instance (AnsibleModule object). + writable: + status -> status of the member (dictionary) + """ + + def __init__(self, management_url, balancer_url, module): + self.host = regexp_extraction(management_url, str(EXPRESSION), 4) + self.management_url = str(management_url) + self.protocol = regexp_extraction(management_url, EXPRESSION, 3) + self.port = regexp_extraction(management_url, EXPRESSION, 5) + self.path = regexp_extraction(management_url, EXPRESSION, 6) + self.balancer_url = str(balancer_url) + self.module = module + + def get_member_attributes(self): + """ Returns a dictionary of a balancer member's attributes.""" + + balancer_member_page = fetch_url(self.module, self.management_url) + + if balancer_member_page[1]['status'] != 200: + self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) + else: + try: + soup = BeautifulSoup(balancer_member_page[0]) + except TypeError as exc: + self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc)) + else: + subsoup = soup.findAll('table')[1].findAll('tr') + keys = subsoup[0].findAll('th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = valuesset.findAll('td') + return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + + def get_member_status(self): + """ Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {'disabled': 'Dis', + 'drained': 'Drn', + 'hot_standby': 'Stby', + 'ignore_errors': 'Ign'} + actual_status = str(self.attributes['Status']) + status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping)) + return status + + def set_member_status(self, values): + """ Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = {'disabled': '&w_status_D', + 'drained': '&w_status_N', + 'hot_standby': '&w_status_H', + 'ignore_errors': '&w_status_I'} + + request_body = regexp_extraction(self.management_url, EXPRESSION, 1) + values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping)) + request_body = "{0}{1}".format(request_body, values_url) + + response = fetch_url(self.module, self.management_url, data=request_body) + if response[1]['status'] != 200: + self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) + + attributes = property(get_member_attributes) + status = property(get_member_status, set_member_status) + + +class Balancer(object): + """ Apache httpd 2.4 mod_proxy balancer object""" + + def __init__(self, host, suffix, module, members=None, tls=False): + if tls: + self.base_url = 'https://' + str(host) + self.url = 'https://' + str(host) + str(suffix) + else: + self.base_url = 'http://' + str(host) + self.url = 'http://' + str(host) + str(suffix) + self.module = module + self.page = self.fetch_balancer_page() + if members is None: + self._members = [] + + def fetch_balancer_page(self): + """ Returns the balancer management html page as a string for later parsing.""" + page = fetch_url(self.module, str(self.url)) + if page[1]['status'] != 200: + self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) + else: + content = page[0].read() + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if apache_version: + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) + return content + else: + self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager") + + def get_balancer_members(self): + """ Returns members of the balancer as a generator object for later iteration.""" + try: + soup = BeautifulSoup(self.page) + except TypeError: + self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) + else: + for element in soup.findAll('a')[1::1]: + balancer_member_suffix = str(element.get('href')) + if not balancer_member_suffix: + self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") + else: + yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) + + members = property(get_balancer_members) + + +def main(): + """ Initiates module.""" + module = AnsibleModule( + argument_spec=dict( + balancer_vhost=dict(required=True, type='str'), + balancer_url_suffix=dict(default="/balancer-manager/", type='str'), + member_host=dict(type='str'), + state=dict(type='str'), + tls=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + if HAS_BEAUTIFULSOUP is False: + module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) + + if module.params['state'] is not None: + states = module.params['state'].split(',') + if (len(states) > 1) and (("present" in states) or ("enabled" in states)): + module.fail_json(msg="state present/enabled is mutually exclusive with other states!") + else: + for _state in states: + if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: + module.fail_json( + msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." + ) + else: + states = ['None'] + + mybalancer = Balancer(module.params['balancer_vhost'], + module.params['balancer_url_suffix'], + module=module, + tls=module.params['tls']) + + if module.params['member_host'] is None: + json_output_list = [] + for member in mybalancer.members: + json_output_list.append({ + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + }) + module.exit_json( + changed=False, + members=json_output_list + ) + else: + changed = False + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status.keys(): + for state in states: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True + + for member in mybalancer.members: + if str(member.host) == str(module.params['member_host']): + member_exists = True + if module.params['state'] is not None: + member_status_before = member.status + if not module.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + if member_status_before != member_status_after: + changed = True + json_output = { + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + } + if member_exists: + module.exit_json( + changed=changed, + member=json_output + ) + else: + module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/apache2_module.py b/ansible_collections/community/general/plugins/modules/apache2_module.py new file mode 100644 index 000000000..2e2456d74 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/apache2_module.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013-2014, Christian Berendt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: apache2_module +author: + - Christian Berendt (@berendt) + - Ralf Hertel (@n0trax) + - Robin Roth (@robinro) +short_description: Enables/disables a module of the Apache2 webserver +description: + - Enables or disables a specified module of the Apache2 webserver. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the module to enable/disable as given to C(a2enmod/a2dismod). + required: true + identifier: + type: str + description: + - Identifier of the module as listed by C(apache2ctl -M). + This is optional and usually determined automatically by the common convention of + appending C(_module) to I(name) as well as custom exception for popular modules. + required: false + force: + description: + - Force disabling of default modules and override Debian warnings. + required: false + type: bool + default: false + state: + type: str + description: + - Desired state of the module. + choices: ['present', 'absent'] + default: present + ignore_configcheck: + description: + - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. + type: bool + default: false + warn_mpm_absent: + description: + - Control the behavior of the warning process for MPM modules. + type: bool + default: true + version_added: 6.3.0 +requirements: ["a2enmod","a2dismod"] +notes: + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. + Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. +''' + +EXAMPLES = ''' +- name: Enable the Apache2 module wsgi + community.general.apache2_module: + state: present + name: wsgi + +- name: Disables the Apache2 module wsgi + community.general.apache2_module: + state: absent + name: wsgi + +- name: Disable default modules for Debian + community.general.apache2_module: + state: absent + name: autoindex + force: true + +- name: Disable mpm_worker and ignore warnings about missing mpm module + community.general.apache2_module: + state: absent + name: mpm_worker + ignore_configcheck: true + +- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module + community.general.apache2_module: + name: "{{ item.module }}" + state: "{{ item.state }}" + warn_mpm_absent: false + ignore_configcheck: true + loop: + - module: mpm_event + state: absent + - module: mpm_prefork + state: present + +- name: Enable dump_io module, which is identified as dumpio_module inside apache2 + community.general.apache2_module: + state: present + name: dump_io + identifier: dumpio_module +''' + +RETURN = ''' +result: + description: message about action taken + returned: always + type: str +warnings: + description: list of warning messages + returned: when needed + type: list +rc: + description: return code of underlying command + returned: failed + type: int +stdout: + description: stdout of underlying command + returned: failed + type: str +stderr: + description: stderr of underlying command + returned: failed + type: str +''' + +import re + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + +_re_threaded = re.compile(r'threaded: *yes') + + +def _run_threaded(module): + control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-V"]) + + return bool(_re_threaded.search(stdout)) + + +def _get_ctl_binary(module): + for command in ['apache2ctl', 'apachectl']: + ctl_binary = module.get_bin_path(command) + if ctl_binary is not None: + return ctl_binary + + module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") + + +def _module_is_enabled(module): + control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-M"]) + + if result != 0: + error_msg = "Error executing %s: %s" % (control_binary, stderr) + if module.params['ignore_configcheck']: + if 'AH00534' in stderr and 'mpm_' in module.params['name']: + if module.params['warn_mpm_absent']: + module.warnings.append( + "No MPM module loaded! apache2 reload AND other module actions" + " will fail if no MPM module is loaded immediately." + ) + else: + module.warnings.append(error_msg) + return False + else: + module.fail_json(msg=error_msg) + + searchstring = ' ' + module.params['identifier'] + return searchstring in stdout + + +def create_apache_identifier(name): + """ + By convention if a module is loaded via name, it appears in apache2ctl -M as + name_module. + + Some modules don't follow this convention and we use replacements for those.""" + + # a2enmod name replacement to apache2ctl -M names + text_workarounds = [ + ('shib', 'mod_shib'), + ('shib2', 'mod_shib'), + ('evasive', 'evasive20_module'), + ] + + # re expressions to extract subparts of names + re_workarounds = [ + ('php', re.compile(r'^(php\d)\.')), + ] + + for a2enmod_spelling, module_name in text_workarounds: + if a2enmod_spelling in name: + return module_name + + for search, reexpr in re_workarounds: + if search in name: + try: + rematch = reexpr.search(name) + return rematch.group(1) + '_module' + except AttributeError: + pass + + return name + '_module' + + +def _set_state(module, state): + name = module.params['name'] + force = module.params['force'] + + want_enabled = state == 'present' + state_string = {'present': 'enabled', 'absent': 'disabled'}[state] + a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] + success_msg = "Module %s %s" % (name, state_string) + + if _module_is_enabled(module) != want_enabled: + if module.check_mode: + module.exit_json(changed=True, + result=success_msg, + warnings=module.warnings) + + a2mod_binary_path = module.get_bin_path(a2mod_binary) + if a2mod_binary_path is None: + module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + + a2mod_binary_cmd = [a2mod_binary_path] + + if not want_enabled and force: + # force exists only for a2dismod on debian + a2mod_binary_cmd.append('-f') + + result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) + + if _module_is_enabled(module) == want_enabled: + module.exit_json(changed=True, + result=success_msg, + warnings=module.warnings) + else: + msg = ( + 'Failed to set module {name} to {state}:\n' + '{stdout}\n' + 'Maybe the module identifier ({identifier}) was guessed incorrectly.' + 'Consider setting the "identifier" option.' + ).format( + name=name, + state=state_string, + stdout=stdout, + identifier=module.params['identifier'] + ) + module.fail_json(msg=msg, + rc=result, + stdout=stdout, + stderr=stderr) + else: + module.exit_json(changed=False, + result=success_msg, + warnings=module.warnings) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + identifier=dict(type='str'), + force=dict(type='bool', default=False), + state=dict(default='present', choices=['absent', 'present']), + ignore_configcheck=dict(type='bool', default=False), + warn_mpm_absent=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + module.warnings = [] + + name = module.params['name'] + if name == 'cgi' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") + + if not module.params['identifier']: + module.params['identifier'] = create_apache_identifier(module.params['name']) + + if module.params['state'] in ['present', 'absent']: + _set_state(module, module.params['state']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/apk.py b/ansible_collections/community/general/plugins/modules/apk.py new file mode 100644 index 000000000..e56b2165d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/apk.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Kevin Brebanov +# Based on pacman (Afterburn , Aaron Bull Schaefer ) +# and apt (Matthew Williams ) modules. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: apk +short_description: Manages apk packages +description: + - Manages I(apk) packages for Alpine Linux. +author: "Kevin Brebanov (@kbrebanov)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + available: + description: + - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) + if the currently installed package is no longer available from any repository. + type: bool + default: false + name: + description: + - A package name, like C(foo), or multiple packages, like C(foo, bar). + type: list + elements: str + no_cache: + description: + - Do not use any local cache path. + type: bool + default: false + version_added: 1.0.0 + repository: + description: + - A package repository or multiple repositories. + Unlike with the underlying apk command, this list will override the system repositories rather than supplement them. + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias. + - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias. + - C(latest) ensures the package(s) is/are present and the latest version(s). + default: present + choices: [ "present", "absent", "latest", "installed", "removed" ] + type: str + update_cache: + description: + - Update repository indexes. Can be run with other steps or on it's own. + type: bool + default: false + upgrade: + description: + - Upgrade all installed packages to their latest version. + type: bool + default: false + world: + description: + - Use a custom world file when checking for explicitly installed packages. + type: str + default: /etc/apk/world + version_added: 5.4.0 +notes: + - 'I(name) and I(upgrade) are mutually exclusive.' + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option. +''' + +EXAMPLES = ''' +- name: Update repositories and install foo package + community.general.apk: + name: foo + update_cache: true + +- name: Update repositories and install foo and bar packages + community.general.apk: + name: foo,bar + update_cache: true + +- name: Remove foo package + community.general.apk: + name: foo + state: absent + +- name: Remove foo and bar packages + community.general.apk: + name: foo,bar + state: absent + +- name: Install the package foo + community.general.apk: + name: foo + state: present + +- name: Install the packages foo and bar + community.general.apk: + name: foo,bar + state: present + +- name: Update repositories and update package foo to latest version + community.general.apk: + name: foo + state: latest + update_cache: true + +- name: Update repositories and update packages foo and bar to latest versions + community.general.apk: + name: foo,bar + state: latest + update_cache: true + +- name: Update all installed packages to the latest versions + community.general.apk: + upgrade: true + +- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available + community.general.apk: + available: true + upgrade: true + +- name: Update repositories as a separate step + community.general.apk: + update_cache: true + +- name: Install package from a specific repository + community.general.apk: + name: foo + state: latest + update_cache: true + repository: http://dl-3.alpinelinux.org/alpine/edge/main + +- name: Install package without using cache + community.general.apk: + name: foo + state: latest + no_cache: true + +- name: Install package checking a custom world + community.general.apk: + name: foo + state: latest + world: /etc/apk/world.custom +''' + +RETURN = ''' +packages: + description: a list of packages that have been changed + returned: when packages have changed + type: list + sample: ['package', 'other-package'] +''' + +import re +# Import module snippets. +from ansible.module_utils.basic import AnsibleModule + + +def parse_for_packages(stdout): + packages = [] + data = stdout.split('\n') + regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)') + for l in data: + p = regex.search(l) + if p: + packages.append(p.group(1)) + return packages + + +def update_package_db(module, exit): + cmd = "%s update" % (APK_PATH) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) + elif exit: + module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr) + else: + return True + + +def query_toplevel(module, name, world): + # world contains a list of top-level packages separated by ' ' or \n + # packages may contain repository (@) or version (=<>~) separator characters or start with negation ! + regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$') + with open(world) as f: + content = f.read().split() + for p in content: + if regex.search(p): + return True + return False + + +def query_package(module, name): + cmd = "%s -v info --installed %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + + +def query_latest(module, name): + cmd = "%s version %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) + match = re.search(search_pattern, stdout) + if match and match.group(2) == "<": + return False + return True + + +def query_virtual(module, name): + cmd = "%s -v info --description %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = r"^%s: virtual meta package" % (re.escape(name)) + if re.search(search_pattern, stdout): + return True + return False + + +def get_dependencies(module, name): + cmd = "%s -v info --depends %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + dependencies = stdout.split() + if len(dependencies) > 1: + return dependencies[1:] + else: + return [] + + +def upgrade_packages(module, available): + if module.check_mode: + cmd = "%s upgrade --simulate" % (APK_PATH) + else: + cmd = "%s upgrade" % (APK_PATH) + if available: + cmd = "%s --available" % cmd + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist) + if re.search(r'^OK', stdout): + module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) + + +def install_packages(module, names, state, world): + upgrade = False + to_install = [] + to_upgrade = [] + for name in names: + # Check if virtual package + if query_virtual(module, name): + # Get virtual package dependencies + dependencies = get_dependencies(module, name) + for dependency in dependencies: + if state == 'latest' and not query_latest(module, dependency): + to_upgrade.append(dependency) + else: + if not query_toplevel(module, name, world): + to_install.append(name) + elif state == 'latest' and not query_latest(module, name): + to_upgrade.append(name) + if to_upgrade: + upgrade = True + if not to_install and not upgrade: + module.exit_json(changed=False, msg="package(s) already installed") + packages = " ".join(to_install + to_upgrade) + if upgrade: + if module.check_mode: + cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) + else: + cmd = "%s add --upgrade %s" % (APK_PATH, packages) + else: + if module.check_mode: + cmd = "%s add --simulate %s" % (APK_PATH, packages) + else: + cmd = "%s add %s" % (APK_PATH, packages) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + if rc != 0: + module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + + +def remove_packages(module, names): + installed = [] + for name in names: + if query_package(module, name): + installed.append(name) + if not installed: + module.exit_json(changed=False, msg="package(s) already removed") + names = " ".join(installed) + if module.check_mode: + cmd = "%s del --purge --simulate %s" % (APK_PATH, names) + else: + cmd = "%s del --purge %s" % (APK_PATH, names) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + packagelist = parse_for_packages(stdout) + # Check to see if packages are still present because of dependencies + for name in installed: + if query_package(module, name): + rc = 1 + break + if rc != 0: + module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + +# ========================================== +# Main control flow. + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), + name=dict(type='list', elements='str'), + no_cache=dict(default=False, type='bool'), + repository=dict(type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + available=dict(default=False, type='bool'), + world=dict(default='/etc/apk/world', type='str'), + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + mutually_exclusive=[['name', 'upgrade']], + supports_check_mode=True + ) + + # Set LANG env since we parse stdout + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + global APK_PATH + APK_PATH = module.get_bin_path('apk', required=True) + + p = module.params + + if p['no_cache']: + APK_PATH = "%s --no-cache" % (APK_PATH, ) + + # add repositories to the APK_PATH + if p['repository']: + for r in p['repository']: + APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r) + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + if p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['update_cache']: + update_package_db(module, not p['name'] and not p['upgrade']) + + if p['upgrade']: + upgrade_packages(module, p['available']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['name'], p['state'], p['world']) + elif p['state'] == 'absent': + remove_packages(module, p['name']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/apt_repo.py b/ansible_collections/community/general/plugins/modules/apt_repo.py new file mode 100644 index 000000000..556039027 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/apt_repo.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Mikhail Gordeev + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: apt_repo +short_description: Manage APT repositories via apt-repo +description: + - Manages APT repositories using apt-repo tool. + - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo +notes: + - This module works on ALT based distros. + - Does NOT support checkmode, due to a limitation in apt-repo tool. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repo: + description: + - Name of the repository to add or remove. + required: true + type: str + state: + description: + - Indicates the desired repository state. + choices: [ absent, present ] + default: present + type: str + remove_others: + description: + - Remove other then added repositories + - Used if I(state=present) + type: bool + default: false + update: + description: + - Update the package database after changing repositories. + type: bool + default: false +author: +- Mikhail Gordeev (@obirvalger) +''' + +EXAMPLES = ''' +- name: Remove all repositories + community.general.apt_repo: + repo: all + state: absent + +- name: Add repository `Sisysphus` and remove other repositories + community.general.apt_repo: + repo: Sisysphus + state: present + remove_others: true + +- name: Add local repository `/space/ALT/Sisyphus` and update package cache + community.general.apt_repo: + repo: copy:///space/ALT/Sisyphus + state: present + update: true +''' + +RETURN = ''' # ''' + +import os + +from ansible.module_utils.basic import AnsibleModule + +APT_REPO_PATH = "/usr/bin/apt-repo" + + +def apt_repo(module, *args): + """run apt-repo with args and return its output""" + # make args list to use in concatenation + args = list(args) + rc, out, err = module.run_command([APT_REPO_PATH] + args) + + if rc != 0: + module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) + + return out + + +def add_repo(module, repo): + """add a repository""" + apt_repo(module, 'add', repo) + + +def rm_repo(module, repo): + """remove a repository""" + apt_repo(module, 'rm', repo) + + +def set_repo(module, repo): + """add a repository and remove other repositories""" + # first add to validate repository + apt_repo(module, 'add', repo) + apt_repo(module, 'rm', 'all') + apt_repo(module, 'add', repo) + + +def update(module): + """update package cache""" + apt_repo(module, 'update') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + remove_others=dict(type='bool', default=False), + update=dict(type='bool', default=False), + ), + ) + + if not os.path.exists(APT_REPO_PATH): + module.fail_json(msg='cannot find /usr/bin/apt-repo') + + params = module.params + repo = params['repo'] + state = params['state'] + old_repositories = apt_repo(module) + + if state == 'present': + if params['remove_others']: + set_repo(module, repo) + else: + add_repo(module, repo) + elif state == 'absent': + rm_repo(module, repo) + + if params['update']: + update(module) + + new_repositories = apt_repo(module) + changed = old_repositories != new_repositories + module.exit_json(changed=changed, repo=repo, state=state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py new file mode 100644 index 000000000..8749086bb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/apt_rpm.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Evgenii Terechkov +# Written by Evgenii Terechkov +# Based on urpmi module written by Philippe Makowski + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: apt_rpm +short_description: APT-RPM package manager +description: + - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + package: + description: + - List of packages to install, upgrade, or remove. + aliases: [ name, pkg ] + type: list + elements: str + state: + description: + - Indicates the desired package state. + choices: [ absent, present, installed, removed ] + default: present + type: str + update_cache: + description: + - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. + - Default is not to update the cache. + type: bool + default: false + clean: + description: + - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but + the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/). + - Can be run as part of the package installation (clean runs before install) or as a separate step. + type: bool + default: false + version_added: 6.5.0 + dist_upgrade: + description: + - If true performs an C(apt-get dist-upgrade) to upgrade system. + type: bool + default: false + version_added: 6.5.0 + update_kernel: + description: + - If true performs an C(update-kernel) to upgrade kernel packages. + type: bool + default: false + version_added: 6.5.0 +author: +- Evgenii Terechkov (@evgkrsk) +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.apt_rpm: + pkg: foo + state: present + +- name: Install packages foo and bar + community.general.apt_rpm: + pkg: + - foo + - bar + state: present + +- name: Remove package foo + community.general.apt_rpm: + pkg: foo + state: absent + +- name: Remove packages foo and bar + community.general.apt_rpm: + pkg: foo,bar + state: absent + +# bar will be the updated if a newer version exists +- name: Update the package database and install bar + community.general.apt_rpm: + name: bar + state: present + update_cache: true + +- name: Run the equivalent of "apt-get clean" as a separate step + community.general.apt_rpm: + clean: true + +- name: Perform cache update and complete system upgrade (includes kernel) + community.general.apt_rpm: + update_cache: true + dist_upgrade: true + update_kernel: true +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + +APT_PATH = "/usr/bin/apt-get" +RPM_PATH = "/usr/bin/rpm" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +UPDATE_KERNEL_ZERO = "\nTry to install new kernel " + + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) + if rc == 0: + return True + else: + return False + + +def query_package_provides(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) + return rc == 0 + + +def update_package_db(module): + rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"}) + return (False, update_out) + + +def dir_size(module, path): + total_size = 0 + for path, dirs, files in os.walk(path): + for f in files: + total_size += os.path.getsize(os.path.join(path, f)) + return total_size + + +def clean(module): + t = dir_size(module, "/var/cache/apt/archives") + rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True) + return (t != dir_size(module, "/var/cache/apt/archives"), out) + + +def dist_upgrade(module): + rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"}) + return (APT_GET_ZERO not in out, out) + + +def update_kernel(module): + rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"}) + return (UPDATE_KERNEL_ZERO not in out, out) + + +def remove_packages(module, packages): + + if packages is None: + return (False, "Empty package list") + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"}) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, err)) + + remove_c += 1 + + if remove_c > 0: + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgspec): + + if pkgspec is None: + return (False, "Empty package list") + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package): + packages += "'%s' " % package + + if len(packages) != 0: + + rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"}) + + installed = True + for packages in pkgspec: + if not query_package_provides(module, package): + installed = False + + # apt-rpm always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) + else: + return (True, "%s present(s)" % packages) + else: + return (False, "Nothing to install") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), + update_cache=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + dist_upgrade=dict(type='bool', default=False), + update_kernel=dict(type='bool', default=False), + package=dict(type='list', elements='str', aliases=['name', 'pkg']), + ), + ) + + if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): + module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") + + p = module.params + modified = False + output = "" + + if p['update_cache']: + update_package_db(module) + + if p['clean']: + (m, out) = clean(module) + modified = modified or m + + if p['dist_upgrade']: + (m, out) = dist_upgrade(module) + modified = modified or m + output += out + + if p['update_kernel']: + (m, out) = update_kernel(module) + modified = modified or m + output += out + + packages = p['package'] + if p['state'] in ['installed', 'present']: + (m, out) = install_packages(module, packages) + modified = modified or m + output += out + + if p['state'] in ['absent', 'removed']: + (m, out) = remove_packages(module, packages) + modified = modified or m + output += out + + # Return total modification status and output of all commands + module.exit_json(changed=modified, msg=output) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/archive.py b/ansible_collections/community/general/plugins/modules/archive.py new file mode 100644 index 000000000..8748fb8a3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/archive.py @@ -0,0 +1,686 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Ben Doherty +# Sponsored by Oomph, Inc. http://www.oomphinc.com +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: archive +short_description: Creates a compressed archive of one or more files or trees +extends_documentation_fragment: + - files + - community.general.attributes +description: + - Creates or extends an archive. + - The source and archive are on the remote host, and the archive I(is not) copied to the local host. + - Source files can be deleted after archival by specifying I(remove=True). +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. + type: list + elements: path + required: true + format: + description: + - The type of compression to use. + - Support for xz was added in Ansible 2.5. + type: str + choices: [ bz2, gz, tar, xz, zip ] + default: gz + dest: + description: + - The file name of the destination archive. The parent directory must exists on the remote host. + - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. + - If the destination archive already exists, it will be truncated and overwritten. + type: path + exclude_path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. + - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. + type: list + elements: path + default: [] + exclusion_patterns: + description: + - Glob style patterns to exclude files or directories from the resulting archive. + - This differs from I(exclude_path) which applies only to the source paths from I(path). + type: list + elements: path + version_added: 3.2.0 + force_archive: + description: + - Allows you to force the module to treat this as an archive even if only a single file is specified. + - By default when a single file is specified it is compressed only (not archived). + - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module. + type: bool + default: false + remove: + description: + - Remove any added source files and trees after adding to archive. + type: bool + default: false +notes: + - Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives. + - This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives. + These are part of the Python standard library for Python 2 and 3. +requirements: + - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format. +seealso: + - module: ansible.builtin.unarchive +author: + - Ben Doherty (@bendoh) +''' + +EXAMPLES = r''' +- name: Compress directory /path/to/foo/ into /path/to/foo.tgz + community.general.archive: + path: /path/to/foo + dest: /path/to/foo.tgz + +- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it + community.general.archive: + path: /path/to/foo + remove: true + +- name: Create a zip archive of /path/to/foo + community.general.archive: + path: /path/to/foo + format: zip + +- name: Create a bz2 archive of multiple files, rooted at /path + community.general.archive: + path: + - /path/to/foo + - /path/wong/foo + dest: /path/file.tar.bz2 + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding specific dirnames + community.general.archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/bar + - /path/to/foo/baz + format: bz2 + +- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames + community.general.archive: + path: + - /path/to/foo/* + dest: /path/file.tar.bz2 + exclude_path: + - /path/to/foo/ba* + format: bz2 + +- name: Use gzip to compress a single archive (i.e don't archive it first with tar) + community.general.archive: + path: /path/to/foo/single.file + dest: /path/file.gz + format: gz + +- name: Create a tar.gz archive of a single file. + community.general.archive: + path: /path/to/foo/single.file + dest: /path/file.tar.gz + format: gz + force_archive: true +''' + +RETURN = r''' +state: + description: + The state of the input C(path). + type: str + returned: always +dest_state: + description: + - The state of the I(dest) file. + - C(absent) when the file does not exist. + - C(archive) when the file is an archive. + - C(compress) when the file is compressed, but not an archive. + - C(incomplete) when the file is an archive, but some files under I(path) were not found. + type: str + returned: success + version_added: 3.4.0 +missing: + description: Any files that were missing from the source. + type: list + returned: success +archived: + description: Any files that were compressed or added to the archive. + type: list + returned: success +arcroot: + description: The archive root. + type: str + returned: always +expanded_paths: + description: The list of matching paths from paths argument. + type: list + returned: always +expanded_exclude_paths: + description: The list of matching exclude paths from the exclude_path argument. + type: list + returned: always +''' + +import abc +import bz2 +import glob +import gzip +import io +import os +import re +import shutil +import tarfile +import zipfile +from fnmatch import fnmatch +from sys import version_info +from traceback import format_exc +from zlib import crc32 + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils import six + +try: # python 3.2+ + from zipfile import BadZipFile # type: ignore[attr-defined] +except ImportError: # older python + from zipfile import BadZipfile as BadZipFile + +LZMA_IMP_ERR = None +if six.PY3: + try: + import lzma + HAS_LZMA = True + except ImportError: + LZMA_IMP_ERR = format_exc() + HAS_LZMA = False +else: + try: + from backports import lzma + HAS_LZMA = True + except ImportError: + LZMA_IMP_ERR = format_exc() + HAS_LZMA = False + +PY27 = version_info[0:2] >= (2, 7) + +STATE_ABSENT = 'absent' +STATE_ARCHIVED = 'archive' +STATE_COMPRESSED = 'compress' +STATE_INCOMPLETE = 'incomplete' + + +def common_path(paths): + empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' + + return os.path.join( + os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty + ) + + +def expand_paths(paths): + expanded_path = [] + is_globby = False + for path in paths: + b_path = _to_bytes(path) + if b'*' in b_path or b'?' in b_path: + e_paths = glob.glob(b_path) + is_globby = True + else: + e_paths = [b_path] + expanded_path.extend(e_paths) + return expanded_path, is_globby + + +def matches_exclusion_patterns(path, exclusion_patterns): + return any(fnmatch(path, p) for p in exclusion_patterns) + + +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def strip_prefix(prefix, string): + return string[len(prefix):] if string.startswith(prefix) else string + + +def _to_bytes(s): + return to_bytes(s, errors='surrogate_or_strict') + + +def _to_native(s): + return to_native(s, errors='surrogate_or_strict') + + +def _to_native_ascii(s): + return to_native(s, errors='surrogate_or_strict', encoding='ascii') + + +@six.add_metaclass(abc.ABCMeta) +class Archive(object): + def __init__(self, module): + self.module = module + + self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None + self.exclusion_patterns = module.params['exclusion_patterns'] or [] + self.format = module.params['format'] + self.must_archive = module.params['force_archive'] + self.remove = module.params['remove'] + + self.changed = False + self.destination_state = STATE_ABSENT + self.errors = [] + self.file = None + self.successes = [] + self.targets = [] + self.not_found = [] + + paths = module.params['path'] + self.expanded_paths, has_globs = expand_paths(paths) + self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] + + self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + + if not self.paths: + module.fail_json( + path=', '.join(paths), + expanded_paths=_to_native(b', '.join(self.expanded_paths)), + expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), + msg='Error, no source paths were found' + ) + + self.root = common_path(self.paths) + + if not self.must_archive: + self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) + + if not self.destination and not self.must_archive: + self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) + + if self.must_archive and not self.destination: + module.fail_json( + dest=_to_native(self.destination), + path=', '.join(paths), + msg='Error, must specify "dest" when archiving multiple files or trees' + ) + + if self.remove: + self._check_removal_safety() + + self.original_checksums = self.destination_checksums() + self.original_size = self.destination_size() + + def add(self, path, archive_name): + try: + self._add(_to_native_ascii(path), _to_native(archive_name)) + if self.contains(_to_native(archive_name)): + self.successes.append(path) + except Exception as e: + self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) + + def add_single_target(self, path): + if self.format in ('zip', 'tar'): + self.open() + self.add(path, strip_prefix(self.root, path)) + self.close() + self.destination_state = STATE_ARCHIVED + else: + try: + f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') + with open(path, 'rb') as f_in: + shutil.copyfileobj(f_in, f_out) + f_out.close() + self.successes.append(path) + self.destination_state = STATE_COMPRESSED + except (IOError, OSError) as e: + self.module.fail_json( + path=_to_native(path), + dest=_to_native(self.destination), + msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() + ) + + def add_targets(self): + self.open() + try: + for target in self.targets: + if os.path.isdir(target): + for directory_path, directory_names, file_names in os.walk(target, topdown=True): + for directory_name in directory_names: + full_path = os.path.join(directory_path, directory_name) + self.add(full_path, strip_prefix(self.root, full_path)) + + for file_name in file_names: + full_path = os.path.join(directory_path, file_name) + self.add(full_path, strip_prefix(self.root, full_path)) + else: + self.add(target, strip_prefix(self.root, target)) + except Exception as e: + if self.format in ('zip', 'tar'): + archive_format = self.format + else: + archive_format = 'tar.' + self.format + self.module.fail_json( + msg='Error when writing %s archive at %s: %s' % ( + archive_format, _to_native(self.destination), _to_native(e) + ), + exception=format_exc() + ) + self.close() + + if self.errors: + self.module.fail_json( + msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) + ) + + def is_different_from_original(self): + if self.original_checksums is None: + return self.original_size != self.destination_size() + else: + return self.original_checksums != self.destination_checksums() + + def destination_checksums(self): + if self.destination_exists() and self.destination_readable(): + return self._get_checksums(self.destination) + return None + + def destination_exists(self): + return self.destination and os.path.exists(self.destination) + + def destination_readable(self): + return self.destination and os.access(self.destination, os.R_OK) + + def destination_size(self): + return os.path.getsize(self.destination) if self.destination_exists() else 0 + + def find_targets(self): + for path in self.paths: + if not os.path.lexists(path): + self.not_found.append(path) + else: + self.targets.append(path) + + def has_targets(self): + return bool(self.targets) + + def has_unfound_targets(self): + return bool(self.not_found) + + def remove_single_target(self, path): + try: + os.remove(path) + except OSError as e: + self.module.fail_json( + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() + ) + + def remove_targets(self): + for path in self.successes: + if os.path.exists(path): + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) + for path in self.paths: + try: + if os.path.isdir(path): + shutil.rmtree(path) + except OSError: + self.errors.append(_to_native(path)) + + if self.errors: + self.module.fail_json( + dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors + ) + + def update_permissions(self): + file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + @property + def result(self): + return { + 'archived': [_to_native(p) for p in self.successes], + 'dest': _to_native(self.destination), + 'dest_state': self.destination_state, + 'changed': self.changed, + 'arcroot': _to_native(self.root), + 'missing': [_to_native(p) for p in self.not_found], + 'expanded_paths': [_to_native(p) for p in self.expanded_paths], + 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], + } + + def _check_removal_safety(self): + for path in self.paths: + if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): + self.module.fail_json( + path=b', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + + def _open_compressed_file(self, path, mode): + f = None + if self.format == 'gz': + f = gzip.open(path, mode) + elif self.format == 'bz2': + f = bz2.BZ2File(path, mode) + elif self.format == 'xz': + f = lzma.LZMAFile(path, mode) + else: + self.module.fail_json(msg="%s is not a valid format" % self.format) + + return f + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def contains(self, name): + pass + + @abc.abstractmethod + def open(self): + pass + + @abc.abstractmethod + def _add(self, path, archive_name): + pass + + @abc.abstractmethod + def _get_checksums(self, path): + pass + + +class ZipArchive(Archive): + def __init__(self, module): + super(ZipArchive, self).__init__(module) + + def close(self): + self.file.close() + + def contains(self, name): + try: + self.file.getinfo(name) + except KeyError: + return False + return True + + def open(self): + self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) + + def _add(self, path, archive_name): + if not matches_exclusion_patterns(path, self.exclusion_patterns): + self.file.write(path, archive_name) + + def _get_checksums(self, path): + try: + archive = zipfile.ZipFile(_to_native_ascii(path), 'r') + checksums = set((info.filename, info.CRC) for info in archive.infolist()) + archive.close() + except BadZipFile: + checksums = set() + return checksums + + +class TarArchive(Archive): + def __init__(self, module): + super(TarArchive, self).__init__(module) + self.fileIO = None + + def close(self): + self.file.close() + if self.format == 'xz': + with lzma.open(_to_native(self.destination), 'wb') as f: + f.write(self.fileIO.getvalue()) + self.fileIO.close() + + def contains(self, name): + try: + self.file.getmember(name) + except KeyError: + return False + return True + + def open(self): + if self.format in ('gz', 'bz2'): + self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) + # python3 tarfile module allows xz format but for python2 we have to create the tarfile + # in memory and then compress it with lzma. + elif self.format == 'xz': + self.fileIO = io.BytesIO() + self.file = tarfile.open(fileobj=self.fileIO, mode='w') + elif self.format == 'tar': + self.file = tarfile.open(_to_native_ascii(self.destination), 'w') + else: + self.module.fail_json(msg="%s is not a valid archive format" % self.format) + + def _add(self, path, archive_name): + def py27_filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo + + def py26_filter(path): + return matches_exclusion_patterns(path, self.exclusion_patterns) + + if PY27: + self.file.add(path, archive_name, recursive=False, filter=py27_filter) + else: + self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + + def _get_checksums(self, path): + if HAS_LZMA: + LZMAError = lzma.LZMAError + else: + # Just picking another exception that's also listed below + LZMAError = tarfile.ReadError + try: + if self.format == 'xz': + with lzma.open(_to_native_ascii(path), 'r') as f: + archive = tarfile.open(fileobj=f) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + else: + archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + except (LZMAError, tarfile.ReadError, tarfile.CompressionError): + try: + # The python implementations of gzip, bz2, and lzma do not support restoring compressed files + # to their original names so only file checksum is returned + f = self._open_compressed_file(_to_native_ascii(path), 'r') + checksum = 0 + while True: + chunk = f.read(16 * 1024 * 1024) + if not chunk: + break + checksum = crc32(chunk, checksum) + checksums = set([(b'', checksum)]) + f.close() + except Exception: + checksums = set() + return checksums + + +def get_archive(module): + if module.params['format'] == 'zip': + return ZipArchive(module) + else: + return TarArchive(module) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', elements='path', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list', elements='path', default=[]), + exclusion_patterns=dict(type='list', elements='path'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + if not HAS_LZMA and module.params['format'] == 'xz': + module.fail_json( + msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR + ) + + check_mode = module.check_mode + + archive = get_archive(module) + archive.find_targets() + + if not archive.has_targets(): + if archive.destination_exists(): + archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED + elif archive.has_targets() and archive.must_archive: + if check_mode: + archive.changed = True + else: + archive.add_targets() + archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + archive.changed |= archive.is_different_from_original() + if archive.remove: + archive.remove_targets() + else: + if check_mode: + if not archive.destination_exists(): + archive.changed = True + else: + path = archive.paths[0] + archive.add_single_target(path) + archive.changed |= archive.is_different_from_original() + if archive.remove: + archive.remove_single_target(path) + + if archive.destination_exists(): + archive.update_permissions() + + module.exit_json(**archive.result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/atomic_container.py b/ansible_collections/community/general/plugins/modules/atomic_container.py new file mode 100644 index 000000000..c26510296 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/atomic_container.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: atomic_container +short_description: Manage the containers on the atomic host platform +description: + - Manage the containers on the atomic host platform. + - Allows to manage the lifecycle of a container on the atomic host platform. +author: "Giuseppe Scrivano (@giuseppe)" +notes: + - Host should support C(atomic) command +requirements: + - atomic + - "python >= 2.6" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Define the backend to use for the container. + required: true + choices: ["docker", "ostree"] + type: str + name: + description: + - Name of the container. + required: true + type: str + image: + description: + - The image to use to install the container. + required: true + type: str + rootfs: + description: + - Define the rootfs of the image. + type: str + state: + description: + - State of the container. + choices: ["absent", "latest", "present", "rollback"] + default: "latest" + type: str + mode: + description: + - Define if it is an user or a system container. + choices: ["user", "system"] + type: str + values: + description: + - Values for the installation of the container. + - This option is permitted only with mode 'user' or 'system'. + - The values specified here will be used at installation time as --set arguments for atomic install. + type: list + elements: str + default: [] +''' + +EXAMPLES = r''' + +- name: Install the etcd system container + community.general.atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: latest + mode: system + values: + - ETCD_NAME=etcd.server + +- name: Uninstall the etcd system container + community.general.atomic_container: + name: etcd + image: rhel/etcd + backend: ostree + state: absent + mode: system +''' + +RETURN = r''' +msg: + description: The command standard output + returned: always + type: str + sample: 'Using default tag: latest ...' +''' + +# import module snippets +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def do_install(module, mode, rootfs, container, image, values_list, backend): + system_list = ["--system"] if mode == 'system' else [] + user_list = ["--user"] if mode == 'user' else [] + rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_update(module, container, image, values_list): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=out, changed=changed) + + +def do_uninstall(module, name, backend): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + module.exit_json(msg=out, changed=True) + + +def do_rollback(module, name): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'rollback', name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Rolling back" in out + module.exit_json(msg=out, changed=changed) + + +def core(module): + mode = module.params['mode'] + name = module.params['name'] + image = module.params['image'] + rootfs = module.params['rootfs'] + values = module.params['values'] + backend = module.params['backend'] + state = module.params['state'] + + atomic_bin = module.get_bin_path('atomic') + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + values_list = ["--set=%s" % x for x in values] if values else [] + + args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: + module.fail_json(rc=rc, msg=err) + return + present = name in out + + if state == 'present' and present: + module.exit_json(msg=out, changed=False) + elif (state in ['latest', 'present']) and not present: + do_install(module, mode, rootfs, name, image, values_list, backend) + elif state == 'latest': + do_update(module, name, image, values_list) + elif state == 'absent': + if not present: + module.exit_json(msg="The container is not present", changed=False) + else: + do_uninstall(module, name, backend) + elif state == 'rollback': + do_rollback(module, name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mode=dict(choices=['user', 'system']), + name=dict(required=True), + image=dict(required=True), + rootfs=dict(), + state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']), + backend=dict(required=True, choices=['docker', 'ostree']), + values=dict(type='list', default=[], elements='str'), + ), + ) + + if module.params['values'] is not None and module.params['mode'] == 'default': + module.fail_json(msg="values is supported only with user or system mode") + + # Verify that the platform supports atomic command + dummy = module.get_bin_path('atomic', required=True) + + try: + core(module) + except Exception as e: + module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/atomic_host.py b/ansible_collections/community/general/plugins/modules/atomic_host.py new file mode 100644 index 000000000..bb44c4489 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/atomic_host.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: atomic_host +short_description: Manage the atomic host platform +description: + - Manage the atomic host platform. + - Rebooting of Atomic host platform should be done outside this module. +author: +- Saravanan KR (@krsacme) +notes: + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). +requirements: + - atomic + - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + revision: + description: + - The version number of the atomic host to be deployed. + - Providing C(latest) will upgrade to the latest available version. + default: 'latest' + aliases: [ version ] + type: str +''' + +EXAMPLES = r''' +- name: Upgrade the atomic host platform to the latest version (atomic host upgrade) + community.general.atomic_host: + revision: latest + +- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) + community.general.atomic_host: + revision: 23.130 +''' + +RETURN = r''' +msg: + description: The command standard output + returned: always + type: str + sample: 'Already on latest' +''' +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def core(module): + revision = module.params['revision'] + atomic_bin = module.get_bin_path('atomic', required=True) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + if revision == 'latest': + args = [atomic_bin, 'host', 'upgrade'] + else: + args = [atomic_bin, 'host', 'deploy', revision] + + rc, out, err = module.run_command(args, check_rc=False) + + if rc == 77 and revision == 'latest': + module.exit_json(msg="Already on latest", changed=False) + elif rc != 0: + module.fail_json(rc=rc, msg=err) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + revision=dict(type='str', default='latest', aliases=["version"]), + ), + ) + + # Verify that the platform is atomic host + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/atomic_image.py b/ansible_collections/community/general/plugins/modules/atomic_image.py new file mode 100644 index 000000000..65aec1e9d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/atomic_image.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: atomic_image +short_description: Manage the container images on the atomic host platform +description: + - Manage the container images on the atomic host platform. + - Allows to execute the commands specified by the RUN label in the container image when present. +author: +- Saravanan KR (@krsacme) +notes: + - Host should support C(atomic) command. +requirements: + - atomic + - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Define the backend where the image is pulled. + choices: [ 'docker', 'ostree' ] + type: str + name: + description: + - Name of the container image. + required: true + type: str + state: + description: + - The state of the container image. + - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. + choices: [ 'absent', 'latest', 'present' ] + default: 'latest' + type: str + started: + description: + - Start or Stop the container. + type: bool + default: true +''' + +EXAMPLES = r''' +- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) + community.general.atomic_image: + name: rhel7/rsyslog + state: latest + +- name: Pull busybox to the OSTree backend + community.general.atomic_image: + name: busybox + state: latest + backend: ostree +''' + +RETURN = r''' +msg: + description: The command standard output + returned: always + type: str + sample: 'Using default tag: latest ...' +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def do_upgrade(module, image): + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'update', '--force', image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=err) + elif 'Image is up to date' in out: + return False + + return True + + +def core(module): + image = module.params['name'] + state = module.params['state'] + started = module.params['started'] + backend = module.params['backend'] + is_upgraded = False + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + atomic_bin = module.get_bin_path('atomic') + out = {} + err = {} + rc = 0 + + if backend: + if state == 'present' or state == 'latest': + args = [atomic_bin, 'pull', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + out_run = "" + if started: + args = [atomic_bin, 'run', "--storage=%s" % backend, image] + rc, out_run, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + + changed = "Extracting" in out or "Copying blob" in out + module.exit_json(msg=(out + out_run), changed=changed) + elif state == 'absent': + args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] + rc, out, err = module.run_command(args, check_rc=False) + if rc < 0: + module.fail_json(rc=rc, msg=err) + else: + changed = "Unable to find" not in out + module.exit_json(msg=out, changed=changed) + return + + if state == 'present' or state == 'latest': + if state == 'latest': + is_upgraded = do_upgrade(module, image) + + if started: + args = [atomic_bin, 'run', image] + else: + args = [atomic_bin, 'install', image] + elif state == 'absent': + args = [atomic_bin, 'uninstall', image] + + rc, out, err = module.run_command(args, check_rc=False) + + if rc < 0: + module.fail_json(rc=rc, msg=err) + elif rc == 1 and 'already present' in err: + module.exit_json(restult=err, changed=is_upgraded) + elif started and 'Container is running' in out: + module.exit_json(result=out, changed=is_upgraded) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + backend=dict(type='str', choices=['docker', 'ostree']), + name=dict(type='str', required=True), + state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']), + started=dict(type='bool', default=True), + ), + ) + + # Verify that the platform supports atomic command + dummy = module.get_bin_path('atomic', required=True) + + try: + core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/awall.py b/ansible_collections/community/general/plugins/modules/awall.py new file mode 100644 index 000000000..da1b29f70 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/awall.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Ted Trask +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: awall +short_description: Manage awall policies +author: Ted Trask (@tdtrask) +description: + - This modules allows for enable/disable/activate of C(awall) policies. + - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files + and activates the configuration on the system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - One or more policy names. + type: list + elements: str + state: + description: + - Whether the policies should be enabled or disabled. + type: str + choices: [ disabled, enabled ] + default: enabled + activate: + description: + - Activate the new firewall rules. + - Can be run with other steps or on its own. + - Idempotency is affected if I(activate=true), as the module will always report a changed state. + type: bool + default: false +notes: + - At least one of I(name) and I(activate) is required. +''' + +EXAMPLES = r''' +- name: Enable "foo" and "bar" policy + community.general.awall: + name: [ foo bar ] + state: enabled + +- name: Disable "foo" and "bar" policy and activate new rules + community.general.awall: + name: + - foo + - bar + state: disabled + activate: false + +- name: Activate currently enabled firewall rules + community.general.awall: + activate: true +''' + +RETURN = ''' # ''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +def activate(module): + cmd = "%s activate --force" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if rc == 0: + return True + else: + module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr) + + +def is_policy_enabled(module, name): + cmd = "%s list" % (AWALL_PATH) + rc, stdout, stderr = module.run_command(cmd) + if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): + return True + return False + + +def enable_policy(module, names, act): + policies = [] + for name in names: + if not is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already enabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s enable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) + + +def disable_policy(module, names, act): + policies = [] + for name in names: + if is_policy_enabled(module, name): + policies.append(name) + if not policies: + module.exit_json(changed=False, msg="policy(ies) already disabled") + names = " ".join(policies) + if module.check_mode: + cmd = "%s list" % (AWALL_PATH) + else: + cmd = "%s disable %s" % (AWALL_PATH, names) + rc, stdout, stderr = module.run_command(cmd) + if rc != 0: + module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) + if act and not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='enabled', choices=['disabled', 'enabled']), + name=dict(type='list', elements='str'), + activate=dict(type='bool', default=False), + ), + required_one_of=[['name', 'activate']], + supports_check_mode=True, + ) + + global AWALL_PATH + AWALL_PATH = module.get_bin_path('awall', required=True) + + p = module.params + + if p['name']: + if p['state'] == 'enabled': + enable_policy(module, p['name'], p['activate']) + elif p['state'] == 'disabled': + disable_policy(module, p['name'], p['activate']) + + if p['activate']: + if not module.check_mode: + activate(module) + module.exit_json(changed=True, msg="activated awall rules") + + module.fail_json(msg="no action defined") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/beadm.py b/ansible_collections/community/general/plugins/modules/beadm.py new file mode 100644 index 000000000..8857fd846 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/beadm.py @@ -0,0 +1,415 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: beadm +short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems +description: + - Create, delete or activate ZFS boot environments. + - Mount and unmount ZFS boot environments. +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - ZFS boot environment name. + type: str + required: true + aliases: [ "be" ] + snapshot: + description: + - If specified, the new boot environment will be cloned from the given + snapshot or inactive boot environment. + type: str + description: + description: + - Associate a description with a new boot environment. This option is + available only on Solarish platforms. + type: str + options: + description: + - Create the datasets for new BE with specific ZFS properties. + - Multiple options can be specified. + - This option is available only on Solarish platforms. + type: str + mountpoint: + description: + - Path where to mount the ZFS boot environment. + type: path + state: + description: + - Create or delete ZFS boot environment. + type: str + choices: [ absent, activated, mounted, present, unmounted ] + default: present + force: + description: + - Specifies if the unmount should be forced. + type: bool + default: false +''' + +EXAMPLES = r''' +- name: Create ZFS boot environment + community.general.beadm: + name: upgrade-be + state: present + +- name: Create ZFS boot environment from existing inactive boot environment + community.general.beadm: + name: upgrade-be + snapshot: be@old + state: present + +- name: Create ZFS boot environment with compression enabled and description "upgrade" + community.general.beadm: + name: upgrade-be + options: "compression=on" + description: upgrade + state: present + +- name: Delete ZFS boot environment + community.general.beadm: + name: old-be + state: absent + +- name: Mount ZFS boot environment on /tmp/be + community.general.beadm: + name: BE + mountpoint: /tmp/be + state: mounted + +- name: Unmount ZFS boot environment + community.general.beadm: + name: BE + state: unmounted + +- name: Activate ZFS boot environment + community.general.beadm: + name: upgrade-be + state: activated +''' + +RETURN = r''' +name: + description: BE name + returned: always + type: str + sample: pre-upgrade +snapshot: + description: ZFS snapshot to create BE from + returned: always + type: str + sample: rpool/ROOT/oi-hipster@fresh +description: + description: BE description + returned: always + type: str + sample: Upgrade from 9.0 to 10.0 +options: + description: BE additional options + returned: always + type: str + sample: compression=on +mountpoint: + description: BE mountpoint + returned: always + type: str + sample: /mnt/be +state: + description: state of the target + returned: always + type: str + sample: present +force: + description: If forced action is wanted + returned: always + type: bool + sample: false +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class BE(object): + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.snapshot = module.params['snapshot'] + self.description = module.params['description'] + self.options = module.params['options'] + self.mountpoint = module.params['mountpoint'] + self.state = module.params['state'] + self.force = module.params['force'] + self.is_freebsd = os.uname()[0] == 'FreeBSD' + + def _beadm_list(self): + cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] + if '@' in self.name: + cmd.append('-s') + return self.module.run_command(cmd) + + def _find_be_by_name(self, out): + if '@' in self.name: + for line in out.splitlines(): + if self.is_freebsd: + check = line.split() + if check == []: + continue + full_name = check[0].split('/') + if full_name == []: + continue + check[0] = full_name[len(full_name) - 1] + if check[0] == self.name: + return check + else: + check = line.split(';') + if check[0] == self.name: + return check + else: + for line in out.splitlines(): + if self.is_freebsd: + check = line.split() + if check[0] == self.name: + return check + else: + check = line.split(';') + if check[0] == self.name: + return check + return None + + def exists(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + if self._find_be_by_name(out): + return True + else: + return False + else: + return False + + def is_activated(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if line is None: + return False + if self.is_freebsd: + if 'R' in line[1]: + return True + else: + if 'R' in line[2]: + return True + + return False + + def activate_be(self): + cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] + return self.module.run_command(cmd) + + def create_be(self): + cmd = [self.module.get_bin_path('beadm'), 'create'] + + if self.snapshot: + cmd.extend(['-e', self.snapshot]) + if not self.is_freebsd: + if self.description: + cmd.extend(['-d', self.description]) + if self.options: + cmd.extend(['-o', self.options]) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def destroy_be(self): + cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] + return self.module.run_command(cmd) + + def is_mounted(self): + (rc, out, dummy) = self._beadm_list() + + if rc == 0: + line = self._find_be_by_name(out) + if line is None: + return False + if self.is_freebsd: + # On FreeBSD, we exclude currently mounted BE on /, as it is + # special and can be activated even if it is mounted. That is not + # possible with non-root BEs. + if line[2] != '-' and line[2] != '/': + return True + else: + if line[3]: + return True + + return False + + def mount_be(self): + cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] + + if self.mountpoint: + cmd.append(self.mountpoint) + + return self.module.run_command(cmd) + + def unmount_be(self): + cmd = [self.module.get_bin_path('beadm'), 'unmount'] + if self.force: + cmd.append('-f') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, aliases=['be']), + snapshot=dict(type='str'), + description=dict(type='str'), + options=dict(type='str'), + mountpoint=dict(type='path'), + state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + be = BE(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = be.name + result['state'] = be.state + + if be.snapshot: + result['snapshot'] = be.snapshot + + if be.description: + result['description'] = be.description + + if be.options: + result['options'] = be.options + + if be.mountpoint: + result['mountpoint'] = be.mountpoint + + if be.state == 'absent': + # beadm on FreeBSD and Solarish systems differs in delete behaviour in + # that we are not allowed to delete activated BE on FreeBSD while on + # Solarish systems we cannot delete BE if it is mounted. We add mount + # check for both platforms as BE should be explicitly unmounted before + # being deleted. On FreeBSD, we also check if the BE is activated. + if be.exists(): + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + if be.is_freebsd: + if be.is_activated(): + module.fail_json(msg='Unable to remove active BE!') + + (rc, out, err) = be.destroy_be() + + if rc != 0: + module.fail_json(msg='Error while destroying BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + else: + module.fail_json(msg='Unable to remove BE as it is mounted!') + + elif be.state == 'present': + if not be.exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.create_be() + + if rc != 0: + module.fail_json(msg='Error while creating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'activated': + if not be.is_activated(): + if module.check_mode: + module.exit_json(changed=True) + + # On FreeBSD, beadm is unable to activate mounted BEs, so we add + # an explicit check for that case. + if be.is_freebsd: + if be.is_mounted(): + module.fail_json(msg='Unable to activate mounted BE!') + + (rc, out, err) = be.activate_be() + + if rc != 0: + module.fail_json(msg='Error while activating BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + elif be.state == 'mounted': + if not be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.mount_be() + + if rc != 0: + module.fail_json(msg='Error while mounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + elif be.state == 'unmounted': + if be.is_mounted(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = be.unmount_be() + + if rc != 0: + module.fail_json(msg='Error while unmounting BE: "%s"' % err, + name=be.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bearychat.py b/ansible_collections/community/general/plugins/modules/bearychat.py new file mode 100644 index 000000000..28f1f8fcd --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bearychat.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Jiangge Zhang +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: bearychat +short_description: Send BearyChat notifications +description: + - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) + via the Incoming Robot integration. +author: "Jiangge Zhang (@tonyseek)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + url: + type: str + description: + - BearyChat WebHook URL. This authenticates you to the bearychat + service. It looks like + C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). + required: true + text: + type: str + description: + - Message to send. + markdown: + description: + - If C(true), text will be parsed as markdown. + default: true + type: bool + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the + default channel selected by the I(url). + attachments: + type: list + elements: dict + description: + - Define a list of attachments. For more information, see + https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments +''' + +EXAMPLES = """ +- name: Send notification message via BearyChat + local_action: + module: bearychat + url: | + https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 + text: "{{ inventory_hostname }} completed" + +- name: Send notification message via BearyChat all options + local_action: + module: bearychat + url: | + https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 + text: "{{ inventory_hostname }} completed" + markdown: false + channel: "#ansible" + attachments: + - title: "Ansible on {{ inventory_hostname }}" + text: "May the Force be with you." + color: "#ffffff" + images: + - http://example.com/index.png +""" + +RETURN = """ +msg: + description: execution result + returned: success + type: str + sample: "OK" +""" + +try: + from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse + HAS_URLPARSE = True +except Exception: + HAS_URLPARSE = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def build_payload_for_bearychat(module, text, markdown, channel, attachments): + payload = {} + if text is not None: + payload['text'] = text + if markdown is not None: + payload['markdown'] = markdown + if channel is not None: + payload['channel'] = channel + if attachments is not None: + payload.setdefault('attachments', []).extend( + build_payload_for_bearychat_attachment( + module, item.get('title'), item.get('text'), item.get('color'), + item.get('images')) + for item in attachments) + payload = 'payload=%s' % module.jsonify(payload) + return payload + + +def build_payload_for_bearychat_attachment(module, title, text, color, images): + attachment = {} + if title is not None: + attachment['title'] = title + if text is not None: + attachment['text'] = text + if color is not None: + attachment['color'] = color + if images is not None: + target_images = attachment.setdefault('images', []) + if not isinstance(images, (list, tuple)): + images = [images] + for image in images: + if isinstance(image, dict) and 'url' in image: + image = {'url': image['url']} + elif hasattr(image, 'startswith') and image.startswith('http'): + image = {'url': image} + else: + module.fail_json( + msg="BearyChat doesn't have support for this kind of " + "attachment image") + target_images.append(image) + return attachment + + +def do_notify_bearychat(module, url, payload): + response, info = fetch_url(module, url, data=payload) + if info['status'] != 200: + url_info = urlparse(url) + obscured_incoming_webhook = urlunparse( + (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) + module.fail_json( + msg=" failed to send %s to %s: %s" % ( + payload, obscured_incoming_webhook, info['msg'])) + + +def main(): + module = AnsibleModule(argument_spec={ + 'url': dict(type='str', required=True, no_log=True), + 'text': dict(type='str'), + 'markdown': dict(default=True, type='bool'), + 'channel': dict(type='str'), + 'attachments': dict(type='list', elements='dict'), + }) + + if not HAS_URLPARSE: + module.fail_json(msg='urlparse is not installed') + + url = module.params['url'] + text = module.params['text'] + markdown = module.params['markdown'] + channel = module.params['channel'] + attachments = module.params['attachments'] + + payload = build_payload_for_bearychat( + module, text, markdown, channel, attachments) + do_notify_bearychat(module, url, payload) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bigpanda.py b/ansible_collections/community/general/plugins/modules/bigpanda.py new file mode 100644 index 000000000..bab200bc4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bigpanda.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: bigpanda +author: "Hagai Kariti (@hkariti)" +short_description: Notify BigPanda about deployments +description: + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + component: + type: str + description: + - "The name of the component being deployed. Ex: billing" + required: true + aliases: ['name'] + version: + type: str + description: + - The deployment version. + required: true + token: + type: str + description: + - API token. + required: true + state: + type: str + description: + - State of the deployment. + required: true + choices: ['started', 'finished', 'failed'] + hosts: + type: str + description: + - Name of affected host name. Can be a list. + - If not specified, it defaults to the remote system's hostname. + required: false + aliases: ['host'] + env: + type: str + description: + - The environment name, typically 'production', 'staging', etc. + required: false + owner: + type: str + description: + - The person responsible for the deployment. + required: false + description: + type: str + description: + - Free text description of the deployment. + required: false + url: + type: str + description: + - Base URL of the API server. + required: false + default: https://api.bigpanda.io + validate_certs: + description: + - If C(false), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: true + type: bool + deployment_message: + type: str + description: + - Message about the deployment. + version_added: '0.2.0' + source_system: + type: str + description: + - Source system used in the requests to the API + default: ansible + +# informational: requirements for nodes +requirements: [ ] +''' + +EXAMPLES = ''' +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: started + +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: finished + +# If outside servers aren't reachable from your machine, use delegate_to and override hosts: +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + hosts: '{{ ansible_hostname }}' + state: started + delegate_to: localhost + register: deployment + +- name: Notify BigPanda about a deployment + community.general.bigpanda: + component: '{{ deployment.component }}' + version: '{{ deployment.version }}' + token: '{{ deployment.token }}' + state: finished + delegate_to: localhost +''' + +# =========================================== +# Module execution. +# +import json +import socket +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + component=dict(required=True, aliases=['name']), + version=dict(required=True), + token=dict(required=True, no_log=True), + state=dict(required=True, choices=['started', 'finished', 'failed']), + hosts=dict(required=False, aliases=['host']), + env=dict(required=False), + owner=dict(required=False), + description=dict(required=False), + deployment_message=dict(required=False), + source_system=dict(required=False, default='ansible'), + validate_certs=dict(default=True, type='bool'), + url=dict(required=False, default='https://api.bigpanda.io'), + ), + supports_check_mode=True, + ) + + token = module.params['token'] + state = module.params['state'] + url = module.params['url'] + + # Build the common request body + body = dict() + for k in ('component', 'version', 'hosts'): + v = module.params[k] + if v is not None: + body[k] = v + if body.get('hosts') is None: + body['hosts'] = [socket.gethostname()] + + if not isinstance(body['hosts'], list): + body['hosts'] = [body['hosts']] + + # Insert state-specific attributes to body + if state == 'started': + for k in ('source_system', 'env', 'owner', 'description'): + v = module.params[k] + if v is not None: + body[k] = v + + request_url = url + '/data/events/deployments/start' + else: + message = module.params['deployment_message'] + if message is not None: + body['errorMessage'] = message + + if state == 'finished': + body['status'] = 'success' + else: + body['status'] = 'failure' + + request_url = url + '/data/events/deployments/end' + + # Build the deployment object we return + deployment = dict(token=token, url=url) + deployment.update(body) + if 'errorMessage' in deployment: + message = deployment.pop('errorMessage') + deployment['message'] = message + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True, **deployment) + + # Send the data to bigpanda + data = json.dumps(body) + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} + try: + response, info = fetch_url(module, request_url, data=data, headers=headers) + if info['status'] == 200: + module.exit_json(changed=True, **deployment) + else: + module.fail_json(msg=json.dumps(info)) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py new file mode 100644 index 000000000..5ef199f7a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: bitbucket_access_key +short_description: Manages Bitbucket repository access keys +description: + - Manages Bitbucket repository access keys (also called deploy keys). +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + type: str + required: true + key: + description: + - The SSH public key. + type: str + label: + description: + - The key label. + type: str + required: true + state: + description: + - Indicates desired state of the access key. + type: str + required: true + choices: [ absent, present ] +notes: + - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create access key + community.general.bitbucket_access_key: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + key: '{{lookup("file", "bitbucket.pub") }}' + label: 'Bitbucket' + state: present + +- name: Delete access key + community.general.bitbucket_access_key: + repository: bitbucket-repo + workspace: bitbucket_workspace + label: Bitbucket + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_key': '`key` is required when the `state` is `present`', + 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository', + 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`', + 'invalid_key': 'Invalid SSH key or key is already in use', +} + +BITBUCKET_API_ENDPOINTS = { + 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL, + 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_deploy_key(module, bitbucket): + """ + Search for an existing deploy key on Bitbucket + with the label specified in module param `label` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing deploy key or None if not found + :rtype: dict or None + + Return example:: + + { + "id": 123, + "label": "mykey", + "created_on": "2019-03-23T10:15:21.517377+00:00", + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5", + "type": "deploy_key", + "comment": "", + "last_used": None, + "repository": { + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test" + }, + "html": { + "href": "https://bitbucket.org/mleu/test" + }, + "avatar": { + "href": "..." + } + }, + "type": "repository", + "name": "test", + "full_name": "mleu/test", + "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}" + }, + "links": { + "self": { + "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123" + } + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + } + + # Look through the all response pages in search of deploy key we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) + + res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) + + if res is not None: + return res + + return None + + +def create_deploy_key(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['key'], + 'label': module.params['label'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] == 400: + module.fail_json(msg=error_messages['invalid_key']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def delete_deploy_key(module, bitbucket, key_id): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + key_id=key_id, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_workspace_or_repo']) + + if info['status'] == 403: + module.fail_json(msg=error_messages['required_permission']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format( + label=module.params['label'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict( + type='str', required=True, + ), + key=dict(type='str', no_log=False), + label=dict(type='str', required=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + key = module.params['key'] + state = module.params['state'] + + # Check parameters + if (key is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_key']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing deploy key (if any) + existing_deploy_key = get_existing_deploy_key(module, bitbucket) + changed = False + + # Create new deploy key in case it doesn't exists + if not existing_deploy_key and (state == 'present'): + if not module.check_mode: + create_deploy_key(module, bitbucket) + changed = True + + # Update deploy key if the old value does not match the new one + elif existing_deploy_key and (state == 'present'): + if not key.startswith(existing_deploy_key.get('key')): + if not module.check_mode: + # Bitbucket doesn't support update key for the same label, + # so we need to delete the old one first + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + create_deploy_key(module, bitbucket) + changed = True + + # Delete deploy key + elif existing_deploy_key and (state == 'absent'): + if not module.check_mode: + delete_deploy_key(module, bitbucket, existing_deploy_key['id']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py new file mode 100644 index 000000000..d39c054b1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_key_pair +short_description: Manages Bitbucket pipeline SSH key pair +description: + - Manages Bitbucket pipeline SSH key pair. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + type: str + required: true + public_key: + description: + - The public key. + type: str + private_key: + description: + - The private key. + type: str + state: + description: + - Indicates desired state of the key pair. + type: str + required: true + choices: [ absent, present ] +notes: + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create or update SSH key pair + community.general.bitbucket_pipeline_key_pair: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + public_key: '{{lookup("file", "bitbucket.pub") }}' + private_key: '{{lookup("file", "bitbucket") }}' + state: present + +- name: Remove SSH key pair + community.general.bitbucket_pipeline_key_pair: + repository: bitbucket-repo + workspace: bitbucket_workspace + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account, repository or SSH key pair was not found', + 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_ssh_key_pair(module, bitbucket): + """ + Retrieves an existing ssh key pair from repository + specified in module param `repository` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing key pair or None if not found + :rtype: dict or None + + Return example:: + + { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT", + "type": "pipeline_ssh_key_pair" + } + """ + api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + + info, content = bitbucket.request( + api_url=api_url, + method='GET', + ) + + if info['status'] == 404: + # Account, repository or SSH key pair was not found. + return None + + return content + + +def update_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='PUT', + data={ + 'private_key': module.params['private_key'], + 'public_key': module.params['public_key'], + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 200: + module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info)) + + +def delete_ssh_key_pair(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info)) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + public_key=dict(type='str'), + private_key=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + state = module.params['state'] + public_key = module.params['public_key'] + private_key = module.params['private_key'] + + # Check parameters + if ((public_key is None) or (private_key is None)) and (state == 'present'): + module.fail_json(msg=error_messages['required_keys']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing ssh key + key_pair = get_existing_ssh_key_pair(module, bitbucket) + changed = False + + # Create or update key pair + if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'): + if not module.check_mode: + update_ssh_key_pair(module, bitbucket) + changed = True + + # Delete key pair + elif key_pair and (state == 'absent'): + if not module.check_mode: + delete_ssh_key_pair(module, bitbucket) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py new file mode 100644 index 000000000..28ff48739 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_known_host +short_description: Manages Bitbucket pipeline known hosts +description: + - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. + - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +requirements: + - paramiko +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + type: str + required: true + name: + description: + - The FQDN of the known host. + type: str + required: true + key: + description: + - The public key. + type: str + state: + description: + - Indicates desired state of the record. + type: str + required: true + choices: [ absent, present ] +notes: + - Check mode is supported. +''' + +EXAMPLES = r''' +- name: Create known hosts from the list + community.general.bitbucket_pipeline_known_host: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + name: '{{ item }}' + state: present + with_items: + - bitbucket.org + - example.com + +- name: Remove known host + community.general.bitbucket_pipeline_known_host: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: bitbucket.org + state: absent + +- name: Specify public key file + community.general.bitbucket_pipeline_known_host: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: bitbucket.org + key: '{{lookup("file", "bitbucket.pub") }}' + state: absent +''' + +RETURN = r''' # ''' + +import socket + +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'invalid_params': 'Account or repository was not found', + 'unknown_key_type': 'Public key type is unknown', +} + +BITBUCKET_API_ENDPOINTS = { + 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL, + 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_known_host(module, bitbucket): + """ + Search for a host in Bitbucket pipelines known hosts + with the name specified in module param `name` + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing host or None if not found + :rtype: dict or None + + Return example:: + + { + 'type': 'pipeline_known_host', + 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}' + 'hostname': 'bitbucket.org', + 'public_key': { + 'type': 'pipeline_ssh_public_key', + 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40', + 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A', + 'key_type': 'ssh-rsa', + 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw==' + }, + } + """ + content = { + 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + } + + # Look through all response pages in search of hostname we need + while 'next' in content: + info, content = bitbucket.request( + api_url=content['next'], + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `workspace`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) + + host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) + + if host is not None: + return host + + return None + + +def get_host_key(module, hostname): + """ + Fetches public key for specified host + + :param module: instance of the :class:`AnsibleModule` + :param hostname: host name + :return: key type and key content + :rtype: tuple + + Return example:: + + ( + 'ssh-rsa', + 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==', + ) + """ + try: + sock = socket.socket() + sock.connect((hostname, 22)) + except socket.error: + module.fail_json(msg='Error opening socket to {0}'.format(hostname)) + + try: + trans = paramiko.transport.Transport(sock) + trans.start_client() + host_key = trans.get_remote_server_key() + except paramiko.SSHException: + module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname)) + + trans.close() + sock.close() + + key_type = host_key.get_name() + key = host_key.get_base64() + + return key_type, key + + +def create_known_host(module, bitbucket): + hostname = module.params['name'] + key_param = module.params['key'] + + if key_param is None: + key_type, key = get_host_key(module, hostname) + elif ' ' in key_param: + key_type, key = key_param.split(' ', 1) + else: + module.fail_json(msg=error_messages['unknown_key_type']) + + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'hostname': hostname, + 'public_key': { + 'key_type': key_type, + 'key': key, + } + }, + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 201: + module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format( + hostname=module.params['hostname'], + info=info, + )) + + +def delete_known_host(module, bitbucket, known_host_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + known_host_uuid=known_host_uuid, + ), + method='DELETE', + ) + + if info['status'] == 404: + module.fail_json(msg=error_messages['invalid_params']) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format( + hostname=module.params['name'], + info=info, + )) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + name=dict(type='str', required=True), + key=dict(type='str', no_log=False), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + if (module.params['key'] is None) and (not HAS_PARAMIKO): + module.fail_json(msg='`paramiko` package not found, please install it.') + + bitbucket = BitbucketHelper(module) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing known host + existing_host = get_existing_known_host(module, bitbucket) + state = module.params['state'] + changed = False + + # Create new host in case it doesn't exists + if not existing_host and (state == 'present'): + if not module.check_mode: + create_known_host(module, bitbucket) + changed = True + + # Delete host + elif existing_host and (state == 'absent'): + if not module.check_mode: + delete_known_host(module, bitbucket, existing_host['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py new file mode 100644 index 000000000..eac0d18dd --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: bitbucket_pipeline_variable +short_description: Manages Bitbucket pipeline variables +description: + - Manages Bitbucket pipeline variables. +author: + - Evgeniy Krysanov (@catcombo) +extends_documentation_fragment: + - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repository: + description: + - The repository name. + type: str + required: true + workspace: + description: + - The repository owner. + - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + type: str + required: true + name: + description: + - The pipeline variable name. + type: str + required: true + value: + description: + - The pipeline variable value. + type: str + secured: + description: + - Whether to encrypt the variable value. + type: bool + default: false + state: + description: + - Indicates desired state of the variable. + type: str + required: true + choices: [ absent, present ] +notes: + - Check mode is supported. + - For secured values return parameter C(changed) is always C(True). +''' + +EXAMPLES = r''' +- name: Create or update pipeline variables from the list + community.general.bitbucket_pipeline_variable: + repository: 'bitbucket-repo' + workspace: bitbucket_workspace + name: '{{ item.name }}' + value: '{{ item.value }}' + secured: '{{ item.secured }}' + state: present + with_items: + - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false } + - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true } + +- name: Remove pipeline variable + community.general.bitbucket_pipeline_variable: + repository: bitbucket-repo + workspace: bitbucket_workspace + name: AWS_ACCESS_KEY + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule, _load_params +from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper + +error_messages = { + 'required_value': '`value` is required when the `state` is `present`', +} + +BITBUCKET_API_ENDPOINTS = { + 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL, + 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL, +} + + +def get_existing_pipeline_variable(module, bitbucket): + """ + Search for a pipeline variable + + :param module: instance of the :class:`AnsibleModule` + :param bitbucket: instance of the :class:`BitbucketHelper` + :return: existing variable or None if not found + :rtype: dict or None + + Return example:: + + { + 'name': 'AWS_ACCESS_OBKEY_ID', + 'value': 'x7HU80-a2', + 'type': 'pipeline_variable', + 'secured': False, + 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}' + } + + The `value` key in dict is absent in case of secured variable. + """ + variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ) + # Look through the all response pages in search of variable we need + page = 1 + while True: + next_url = "%s?page=%s" % (variables_base_url, page) + info, content = bitbucket.request( + api_url=next_url, + method='GET', + ) + + if info['status'] == 404: + module.fail_json(msg='Invalid `repository` or `workspace`.') + + if info['status'] != 200: + module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info)) + + # We are at the end of list + if 'pagelen' in content and content['pagelen'] == 0: + return None + + page += 1 + var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) + + if var is not None: + var['name'] = var.pop('key') + return var + + +def create_pipeline_variable(module, bitbucket): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + ), + method='POST', + data={ + 'key': module.params['name'], + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 201: + module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def update_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='PUT', + data={ + 'value': module.params['value'], + 'secured': module.params['secured'], + }, + ) + + if info['status'] != 200: + module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +def delete_pipeline_variable(module, bitbucket, variable_uuid): + info, content = bitbucket.request( + api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format( + workspace=module.params['workspace'], + repo_slug=module.params['repository'], + variable_uuid=variable_uuid, + ), + method='DELETE', + ) + + if info['status'] != 204: + module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format( + name=module.params['name'], + info=info, + )) + + +class BitBucketPipelineVariable(AnsibleModule): + def __init__(self, *args, **kwargs): + params = _load_params() or {} + if params.get('secured'): + kwargs['argument_spec']['value'].update({'no_log': True}) + super(BitBucketPipelineVariable, self).__init__(*args, **kwargs) + + +def main(): + argument_spec = BitbucketHelper.bitbucket_argument_spec() + argument_spec.update( + repository=dict(type='str', required=True), + workspace=dict(type='str', required=True), + name=dict(type='str', required=True), + value=dict(type='str'), + secured=dict(type='bool', default=False), + state=dict(type='str', choices=['present', 'absent'], required=True), + ) + module = BitBucketPipelineVariable( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=BitbucketHelper.bitbucket_required_one_of(), + required_together=BitbucketHelper.bitbucket_required_together(), + ) + + bitbucket = BitbucketHelper(module) + + value = module.params['value'] + state = module.params['state'] + secured = module.params['secured'] + + # Check parameters + if (value is None) and (state == 'present'): + module.fail_json(msg=error_messages['required_value']) + + # Retrieve access token for authorized API requests + bitbucket.fetch_access_token() + + # Retrieve existing pipeline variable (if any) + existing_variable = get_existing_pipeline_variable(module, bitbucket) + changed = False + + # Create new variable in case it doesn't exists + if not existing_variable and (state == 'present'): + if not module.check_mode: + create_pipeline_variable(module, bitbucket) + changed = True + + # Update variable if it is secured or the old value does not match the new one + elif existing_variable and (state == 'present'): + if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value): + if not module.check_mode: + update_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + # Delete variable + elif existing_variable and (state == 'absent'): + if not module.check_mode: + delete_pipeline_variable(module, bitbucket, existing_variable['uuid']) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bower.py b/ansible_collections/community/general/plugins/modules/bower.py new file mode 100644 index 000000000..1824e68bb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bower.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Michael Warkentin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: bower +short_description: Manage bower packages with bower +description: + - Manage bower packages with bower +author: "Michael Warkentin (@mwarkentin)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - The name of a bower package to install + offline: + description: + - Install packages from local cache, if the packages were installed before + type: bool + default: false + production: + description: + - Install with --production flag + type: bool + default: false + path: + type: path + description: + - The base path where to install the bower packages + required: true + relative_execpath: + type: path + description: + - Relative path to bower executable from install path + state: + type: str + description: + - The state of the bower package + default: present + choices: [ "present", "absent", "latest" ] + version: + type: str + description: + - The version to be installed +''' + +EXAMPLES = ''' +- name: Install "bootstrap" bower package. + community.general.bower: + name: bootstrap + +- name: Install "bootstrap" bower package on version 3.1.1. + community.general.bower: + name: bootstrap + version: '3.1.1' + +- name: Remove the "bootstrap" bower package. + community.general.bower: + name: bootstrap + state: absent + +- name: Install packages based on bower.json. + community.general.bower: + path: /app/location + +- name: Update packages based on bower.json to their latest version. + community.general.bower: + path: /app/location + state: latest + +# install bower locally and run from there +- npm: + path: /app/location + name: bower + global: false +- community.general.bower: + path: /app/location + relative_execpath: node_modules/.bin +''' +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +class Bower(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs['name'] + self.offline = kwargs['offline'] + self.production = kwargs['production'] + self.path = kwargs['path'] + self.relative_execpath = kwargs['relative_execpath'] + self.version = kwargs['version'] + + if kwargs['version']: + self.name_version = self.name + '#' + self.version + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [] + + if self.relative_execpath: + cmd.append(os.path.join(self.path, self.relative_execpath, "bower")) + if not os.path.isfile(cmd[-1]): + self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath) + else: + cmd.append("bower") + + cmd.extend(args) + cmd.extend(['--config.interactive=false', '--allow-root']) + + if self.name: + cmd.append(self.name_version) + + if self.offline: + cmd.append('--offline') + + if self.production: + cmd.append('--production') + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json'] + + installed = list() + missing = list() + outdated = list() + data = json.loads(self._exec(cmd, True, False)) + if 'dependencies' in data: + for dep in data['dependencies']: + dep_data = data['dependencies'][dep] + if dep_data.get('missing', False): + missing.append(dep) + elif ('version' in dep_data['pkgMeta'] and + 'update' in dep_data and + dep_data['pkgMeta']['version'] != dep_data['update']['latest']): + outdated.append(dep) + elif dep_data.get('incompatible', False): + outdated.append(dep) + else: + installed.append(dep) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing, outdated + + def install(self): + return self._exec(['install']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + +def main(): + arg_spec = dict( + name=dict(default=None), + offline=dict(default=False, type='bool'), + production=dict(default=False, type='bool'), + path=dict(required=True, type='path'), + relative_execpath=dict(default=None, required=False, type='path'), + state=dict(default='present', choices=['present', 'absent', 'latest', ]), + version=dict(default=None), + ) + module = AnsibleModule( + argument_spec=arg_spec + ) + + name = module.params['name'] + offline = module.params['offline'] + production = module.params['production'] + path = module.params['path'] + relative_execpath = module.params['relative_execpath'] + state = module.params['state'] + version = module.params['version'] + + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) + + changed = False + if state == 'present': + installed, missing, outdated = bower.list() + if missing: + changed = True + bower.install() + elif state == 'latest': + installed, missing, outdated = bower.list() + if missing or outdated: + changed = True + bower.update() + else: # Absent + installed, missing, outdated = bower.list() + if name in installed: + changed = True + bower.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/btrfs_info.py b/ansible_collections/community/general/plugins/modules/btrfs_info.py new file mode 100644 index 000000000..c367b9ed1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/btrfs_info.py @@ -0,0 +1,109 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: btrfs_info +short_description: Query btrfs filesystem info +version_added: "6.6.0" +description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints. + +author: + - Gregory Furlong (@gnfzdz) + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = r''' + +- name: Query information about mounted btrfs filesystems + community.general.btrfs_info: + register: my_btrfs_info + +''' + +RETURN = r''' + +filesystems: + description: Summaries of the current state for all btrfs filesystems found on the target host. + type: list + elements: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The id of the filesystem's default subvolume. + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ['/home'] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home + +''' + + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + module_args = dict() + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + provider = BtrfsFilesystemsProvider(module) + filesystems = [x.get_summary() for x in provider.get_filesystems()] + result = { + "filesystems": filesystems, + } + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py new file mode 100644 index 000000000..cd2ac6f97 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py @@ -0,0 +1,682 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: btrfs_subvolume +short_description: Manage btrfs subvolumes +version_added: "6.6.0" + +description: Creates, updates and deletes btrfs subvolumes and snapshots. + +options: + automount: + description: + - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes. + type: bool + default: false + default: + description: + - Make the subvolume specified by I(name) the filesystem's default subvolume. + type: bool + default: false + filesystem_device: + description: + - A block device contained within the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: path + filesystem_label: + description: + - A descriptive label assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + filesystem_uuid: + description: + - A unique identifier assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + name: + description: + - Name of the subvolume/snapshot to be targeted. + required: true + type: str + recursive: + description: + - When true, indicates that parent/child subvolumes should be created/removedas necessary + to complete the operation (for I(state=present) and I(state=absent) respectively). + type: bool + default: false + snapshot_source: + description: + - Identifies the source subvolume for the created snapshot. + - Infers that the created subvolume is a snapshot. + type: str + snapshot_conflict: + description: + - Policy defining behavior when a subvolume already exists at the path of the requested snapshot. + - C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required. + Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source. + - C(clobber) - If a subvolume already exists at the requested location, delete it first. + This option is not idempotent and will result in a new snapshot being generated on every execution. + - C(error) - If a subvolume already exists at the requested location, return an error. + This option is not idempotent and will result in an error on replay of the module. + type: str + choices: [ skip, clobber, error ] + default: skip + state: + description: + - Indicates the current state of the targeted subvolume. + type: str + choices: [ absent, present ] + default: present + +notes: + - If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected + to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single + btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: partial + details: + - In some scenarios it may erroneously report intermediate subvolumes being created. + After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped. + diff_mode: + support: none + +author: + - Gregory Furlong (@gnfzdz) +''' + +EXAMPLES = r''' + +- name: Create a @home subvolume under the root subvolume + community.general.btrfs_subvolume: + name: /@home + device: /dev/vda2 + +- name: Remove the @home subvolume if it exists + community.general.btrfs_subvolume: + name: /@home + state: absent + device: /dev/vda2 + +- name: Create a snapshot of the root subvolume named @ + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + device: /dev/vda2 + +- name: Create a snapshot of the root subvolume and make it the new default subvolume + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + default: Yes + device: /dev/vda2 + +- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: True + device: /dev/vda2 + +- name: Remove the /@ subvolume and recursively delete child subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: True + device: /dev/vda2 + +''' + +RETURN = r''' + +filesystem: + description: + - A summary of the final state of the targeted btrfs filesystem. + type: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + returned: success + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + returned: success + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + returned: success + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + returned: success and if filesystem is mounted + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + returned: success and if filesystem is mounted + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ['/home'] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home + +modifications: + description: + - A list where each element describes a change made to the target btrfs filesystem. + type: list + returned: Success + elements: str + +target_subvolume_id: + description: + - The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution. + type: int + sample: 257 + returned: Success and subvolume exists after module execution +''' + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException +from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path +from ansible.module_utils.basic import AnsibleModule +import os +import tempfile + + +class BtrfsSubvolumeModule(object): + + __BTRFS_ROOT_SUBVOLUME = '/' + __BTRFS_ROOT_SUBVOLUME_ID = 5 + __BTRFS_SUBVOLUME_INODE_NUMBER = 256 + + __CREATE_SUBVOLUME_OPERATION = 'create' + __CREATE_SNAPSHOT_OPERATION = 'snapshot' + __DELETE_SUBVOLUME_OPERATION = 'delete' + __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default' + + __UNKNOWN_SUBVOLUME_ID = '?' + + def __init__(self, module): + self.module = module + self.__btrfs_api = BtrfsCommands(module) + self.__provider = BtrfsFilesystemsProvider(module) + + # module parameters + name = self.module.params['name'] + self.__name = normalize_subvolume_path(name) if name is not None else None + self.__state = self.module.params['state'] + + self.__automount = self.module.params['automount'] + self.__default = self.module.params['default'] + self.__filesystem_device = self.module.params['filesystem_device'] + self.__filesystem_label = self.module.params['filesystem_label'] + self.__filesystem_uuid = self.module.params['filesystem_uuid'] + self.__recursive = self.module.params['recursive'] + self.__snapshot_conflict = self.module.params['snapshot_conflict'] + snapshot_source = self.module.params['snapshot_source'] + self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None + + # execution state + self.__filesystem = None + self.__required_mounts = [] + self.__unit_of_work = [] + self.__completed_work = [] + self.__temporary_mounts = dict() + + def run(self): + error = None + try: + self.__load_filesystem() + self.__prepare_unit_of_work() + + if not self.module.check_mode: + # check required mounts & mount + if len(self.__unit_of_work) > 0: + self.__execute_unit_of_work() + self.__filesystem.refresh() + else: + # check required mounts + self.__completed_work.extend(self.__unit_of_work) + except Exception as e: + error = e + finally: + self.__cleanup_mounts() + if self.__filesystem is not None: + self.__filesystem.refresh_mountpoints() + + return (error, self.get_results()) + + # Identify the targeted filesystem and obtain the current state + def __load_filesystem(self): + if self.__has_filesystem_criteria(): + filesystem = self.__find_matching_filesytem() + else: + filesystem = self.__find_default_filesystem() + + # The filesystem must be mounted to obtain the current state (subvolumes, default, etc) + if not filesystem.is_mounted(): + if not self.__automount: + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted and automount=False." + "Mount explicitly before module execution or pass automount=True" % filesystem.uuid) + elif self.module.check_mode: + # TODO is failing the module an appropriate outcome in this scenario? + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted. Unable to validate the current" + "state while running with check_mode=True" % filesystem.uuid) + else: + self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID) + filesystem.refresh() + self.__filesystem = filesystem + + def __has_filesystem_criteria(self): + return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None + + def __find_matching_filesytem(self): + criteria = { + 'uuid': self.__filesystem_uuid, + 'label': self.__filesystem_label, + 'device': self.__filesystem_device, + } + return self.__provider.get_matching_filesystem(criteria) + + def __find_default_filesystem(self): + filesystems = self.__provider.get_filesystems() + filesystem = None + + if len(filesystems) == 1: + filesystem = filesystems[0] + else: + mounted_filesystems = [x for x in filesystems if x.is_mounted()] + if len(mounted_filesystems) == 1: + filesystem = mounted_filesystems[0] + + if filesystem is not None: + return filesystem + else: + raise BtrfsModuleException( + "Failed to automatically identify targeted filesystem. " + "No explicit device indicated and found %d available filesystems." % len(filesystems) + ) + + # Prepare unit of work + def __prepare_unit_of_work(self): + if self.__state == "present": + if self.__snapshot_source is None: + self.__prepare_subvolume_present() + else: + self.__prepare_snapshot_present() + + if self.__default: + self.__prepare_set_default() + elif self.__state == "absent": + self.__prepare_subvolume_absent() + + def __prepare_subvolume_present(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is None: + self.__prepare_before_create_subvolume(self.__name) + self.__stage_create_subvolume(self.__name) + + def __prepare_before_create_subvolume(self, subvolume_name): + closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name) + self.__stage_required_mount(closest_parent) + if self.__recursive: + self.__prepare_create_intermediates(closest_parent, subvolume_name) + + def __prepare_create_intermediates(self, closest_subvolume, subvolume_name): + relative_path = closest_subvolume.get_child_relative_path(self.__name) + missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0] + if len(missing_subvolumes) > 1: + current = closest_subvolume.path + for s in missing_subvolumes[:-1]: + separator = os.path.sep if current[-1] != os.path.sep else "" + current = current + separator + s + self.__stage_create_subvolume(current, True) + + def __prepare_snapshot_present(self): + source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source) + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_exists = subvolume is not None + + if subvolume_exists: + if self.__snapshot_conflict == "skip": + # No change required + return + elif self.__snapshot_conflict == "error": + raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name) + + if source_subvolume is None: + raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source) + elif subvolume is not None and source_subvolume.id == subvolume.id: + raise BtrfsModuleException("Snapshot source and target are the same.") + else: + self.__stage_required_mount(source_subvolume) + + if subvolume_exists and self.__snapshot_conflict == "clobber": + self.__prepare_delete_subvolume_tree(subvolume) + elif not subvolume_exists: + self.__prepare_before_create_subvolume(self.__name) + + self.__stage_create_snapshot(source_subvolume, self.__name) + + def __prepare_subvolume_absent(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is not None: + self.__prepare_delete_subvolume_tree(subvolume) + + def __prepare_delete_subvolume_tree(self, subvolume): + if subvolume.is_filesystem_root(): + raise BtrfsModuleException("Can not delete the filesystem's root subvolume") + if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0: + raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False." + "Either explicitly delete the child subvolumes first or pass " + "parameter recursive=True." % subvolume.path) + + self.__stage_required_mount(subvolume.get_parent_subvolume()) + queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume] + # prepare unit of work + for s in queue: + if s.is_mounted(): + # TODO potentially unmount the subvolume if automount=True ? + raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path) + if s.is_filesystem_default(): + self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID) + self.__stage_delete_subvolume(s) + + def __prepare_recursive_delete_order(self, subvolume): + """Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors""" + pending = [subvolume] + ordered = [] + while len(pending) > 0: + next = pending.pop() + ordered.append(next) + pending.extend(next.get_child_subvolumes()) + ordered.reverse() # reverse to ensure children are deleted before their parent + return ordered + + def __prepare_set_default(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_id = subvolume.id if subvolume is not None else None + + if self.__filesystem.default_subvolid != subvolume_id: + self.__stage_set_default_subvolume(self.__name, subvolume_id) + + # Stage operations to the unit of work + def __stage_required_mount(self, subvolume): + if subvolume.get_mounted_path() is None: + if self.__automount: + self.__required_mounts.append(subvolume) + else: + raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path) + + def __stage_create_subvolume(self, subvolume_path, intermediate=False): + """ + Add required creation of an intermediate subvolume to the unit of work + If intermediate is true, the action will be skipped if a directory like file is found at target + after mounting a parent subvolume + """ + self.__unit_of_work.append({ + 'action': self.__CREATE_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'intermediate': intermediate, + }) + + def __stage_create_snapshot(self, source_subvolume, target_subvolume_path): + """Add creation of a snapshot from source to target to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__CREATE_SNAPSHOT_OPERATION, + 'source': source_subvolume.path, + 'source_id': source_subvolume.id, + 'target': target_subvolume_path, + }) + + def __stage_delete_subvolume(self, subvolume): + """Add deletion of the target subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__DELETE_SUBVOLUME_OPERATION, + 'target': subvolume.path, + 'target_id': subvolume.id, + }) + + def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None): + """Add update of the filesystem's default subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'target_id': subvolume_id, + }) + + # Execute the unit of work + def __execute_unit_of_work(self): + self.__check_required_mounts() + for op in self.__unit_of_work: + if op['action'] == self.__CREATE_SUBVOLUME_OPERATION: + self.__execute_create_subvolume(op) + elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION: + self.__execute_create_snapshot(op) + elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION: + self.__execute_delete_subvolume(op) + elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + self.__execute_set_default_subvolume(op) + else: + raise ValueError("Unknown operation type '%s'" % op['action']) + + def __execute_create_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + if not self.__is_existing_directory_like(target_mounted_path): + self.__btrfs_api.subvolume_create(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_create_snapshot(self, operation): + source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source']) + source_mounted_path = source_subvolume.get_mounted_path() + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + + self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path) + self.__completed_work.append(operation) + + def __execute_delete_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + self.__btrfs_api.subvolume_delete(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_set_default_subvolume(self, operation): + target = operation['target'] + target_id = operation['target_id'] + + if target_id is None: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + self.__filesystem.refresh() # the target may have been created earlier in module execution + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target) + else: + target_id = target_subvolume.id + + self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id) + self.__completed_work.append(operation) + + def __is_existing_directory_like(self, path): + return os.path.exists(path) and ( + os.path.isdir(path) or + os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER + ) + + def __check_required_mounts(self): + filtered = self.__filter_child_subvolumes(self.__required_mounts) + if len(filtered) > 0: + for subvolume in filtered: + self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id) + self.__filesystem.refresh_mountpoints() + + def __filter_child_subvolumes(self, subvolumes): + """Filter the provided list of subvolumes to remove any that are a child of another item in the list""" + filtered = [] + last = None + ordered = sorted(subvolumes, key=lambda x: x.path) + for next in ordered: + if last is None or not next.path[0:len(last)] == last: + filtered.append(next) + last = next.path + return filtered + + # Create/cleanup temporary mountpoints + def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid): + # this check should be redundant + if self.module.check_mode or not self.__automount: + raise BtrfsModuleException("Unable to temporarily mount required subvolumes" + "with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode)) + + cache_key = "%s:%d" % (filesystem.uuid, subvolid) + # The subvolume was already mounted, so return the current path + if cache_key in self.__temporary_mounts: + return self.__temporary_mounts[cache_key] + + device = filesystem.devices[0] + mountpoint = tempfile.mkdtemp(dir="/tmp") + self.__temporary_mounts[cache_key] = mountpoint + + mount = self.module.get_bin_path("mount", required=True) + command = "%s -o noatime,subvolid=%d %s %s " % (mount, + subvolid, + device, + mountpoint) + result = self.module.run_command(command, check_rc=True) + + return mountpoint + + def __cleanup_mounts(self): + for key in self.__temporary_mounts.keys(): + self.__cleanup_mount(self.__temporary_mounts[key]) + + def __cleanup_mount(self, mountpoint): + umount = self.module.get_bin_path("umount", required=True) + result = self.module.run_command("%s %s" % (umount, mountpoint)) + if result[0] == 0: + rmdir = self.module.get_bin_path("rmdir", required=True) + self.module.run_command("%s %s" % (rmdir, mountpoint)) + + # Format and return results + def get_results(self): + target = self.__filesystem.get_subvolume_by_name(self.__name) + return dict( + changed=len(self.__completed_work) > 0, + filesystem=self.__filesystem.get_summary(), + modifications=self.__get_formatted_modifications(), + target_subvolume_id=(target.id if target is not None else None) + ) + + def __get_formatted_modifications(self): + return [self.__format_operation_result(op) for op in self.__completed_work] + + def __format_operation_result(self, operation): + action_type = operation['action'] + if action_type == self.__CREATE_SUBVOLUME_OPERATION: + return self.__format_create_subvolume_result(operation) + elif action_type == self.__CREATE_SNAPSHOT_OPERATION: + return self.__format_create_snapshot_result(operation) + elif action_type == self.__DELETE_SUBVOLUME_OPERATION: + return self.__format_delete_subvolume_result(operation) + elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + return self.__format_set_default_subvolume_result(operation) + else: + raise ValueError("Unknown operation type '%s'" % operation['action']) + + def __format_create_subvolume_result(self, operation): + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created subvolume '%s' (%s)" % (target, target_id) + + def __format_create_snapshot_result(self, operation): + source = operation['source'] + source_id = operation['source_id'] + + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id) + + def __format_delete_subvolume_result(self, operation): + target = operation['target'] + target_id = operation['target_id'] + return "Deleted subvolume '%s' (%s)" % (target, target_id) + + def __format_set_default_subvolume_result(self, operation): + target = operation['target'] + if 'target_id' in operation: + target_id = operation['target_id'] + else: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Updated default subvolume to '%s' (%s)" % (target, target_id) + + +def run_module(): + module_args = dict( + automount=dict(type='bool', required=False, default=False), + default=dict(type='bool', required=False, default=False), + filesystem_device=dict(type='path', required=False), + filesystem_label=dict(type='str', required=False), + filesystem_uuid=dict(type='str', required=False), + name=dict(type='str', required=True), + recursive=dict(type='bool', default=False), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + snapshot_source=dict(type='str', required=False), + snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + subvolume = BtrfsSubvolumeModule(module) + error, result = subvolume.run() + if error is not None: + module.fail_json(str(error), **result) + else: + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bundler.py b/ansible_collections/community/general/plugins/modules/bundler.py new file mode 100644 index 000000000..682dd334a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bundler.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Tim Hoiberg +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: bundler +short_description: Manage Ruby Gem dependencies with Bundler +description: + - Manage installation and Gem version dependencies for Ruby using the Bundler gem +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + type: str + description: + - The path to the bundler executable + state: + type: str + description: + - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + choices: [present, latest] + default: present + chdir: + type: path + description: + - The directory to execute the bundler commands from. This directory + needs to contain a valid Gemfile or .bundle/ directory + - If not specified, it will default to the temporary working directory + exclude_groups: + type: list + elements: str + description: + - A list of Gemfile groups to exclude during operations. This only + applies when state is C(present). Bundler considers this + a 'remembered' property for the Gemfile and will automatically exclude + groups in future operations even if C(exclude_groups) is not set + clean: + description: + - Only applies if state is C(present). If set removes any gems on the + target host that are not in the gemfile + type: bool + default: false + gemfile: + type: path + description: + - Only applies if state is C(present). The path to the gemfile to use to install gems. + - If not specified it will default to the Gemfile in current directory + local: + description: + - If set only installs gems from the cache on the target host + type: bool + default: false + deployment_mode: + description: + - Only applies if state is C(present). If set it will install gems in + ./vendor/bundle instead of the default location. Requires a Gemfile.lock + file to have been created prior + type: bool + default: false + user_install: + description: + - Only applies if state is C(present). Installs gems in the local user's cache or for all users + type: bool + default: true + gem_path: + type: path + description: + - Only applies if state is C(present). Specifies the directory to + install the gems into. If C(chdir) is set then this path is relative to + C(chdir) + - If not specified the default RubyGems gem paths will be used. + binstub_directory: + type: path + description: + - Only applies if state is C(present). Specifies the directory to + install any gem bins files to. When executed the bin files will run + within the context of the Gemfile and fail if any required gem + dependencies are not installed. If C(chdir) is set then this path is + relative to C(chdir) + extra_args: + type: str + description: + - A space separated string of additional commands that can be applied to + the Bundler command. Refer to the Bundler documentation for more + information +author: "Tim Hoiberg (@thoiberg)" +''' + +EXAMPLES = ''' +- name: Install gems from a Gemfile in the current directory + community.general.bundler: + state: present + executable: ~/.rvm/gems/2.1.5/bin/bundle + +- name: Exclude the production group from installing + community.general.bundler: + state: present + exclude_groups: production + +- name: Install gems into ./vendor/bundle + community.general.bundler: + state: present + deployment_mode: true + +- name: Install gems using a Gemfile in another directory + community.general.bundler: + state: present + gemfile: ../rails_project/Gemfile + +- name: Update Gemfile in another directory + community.general.bundler: + state: latest + chdir: ~/rails_project +''' + +from ansible.module_utils.basic import AnsibleModule + + +def get_bundler_executable(module): + if module.params.get('executable'): + result = module.params.get('executable').split(' ') + else: + result = [module.get_bin_path('bundle', True)] + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + executable=dict(default=None, required=False), + state=dict(default='present', required=False, choices=['present', 'latest']), + chdir=dict(default=None, required=False, type='path'), + exclude_groups=dict(default=None, required=False, type='list', elements='str'), + clean=dict(default=False, required=False, type='bool'), + gemfile=dict(default=None, required=False, type='path'), + local=dict(default=False, required=False, type='bool'), + deployment_mode=dict(default=False, required=False, type='bool'), + user_install=dict(default=True, required=False, type='bool'), + gem_path=dict(default=None, required=False, type='path'), + binstub_directory=dict(default=None, required=False, type='path'), + extra_args=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + state = module.params.get('state') + chdir = module.params.get('chdir') + exclude_groups = module.params.get('exclude_groups') + clean = module.params.get('clean') + gemfile = module.params.get('gemfile') + local = module.params.get('local') + deployment_mode = module.params.get('deployment_mode') + user_install = module.params.get('user_install') + gem_path = module.params.get('gem_path') + binstub_directory = module.params.get('binstub_directory') + extra_args = module.params.get('extra_args') + + cmd = get_bundler_executable(module) + + if module.check_mode: + cmd.append('check') + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False) + + module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err) + + if state == 'present': + cmd.append('install') + if exclude_groups: + cmd.extend(['--without', ':'.join(exclude_groups)]) + if clean: + cmd.append('--clean') + if gemfile: + cmd.extend(['--gemfile', gemfile]) + if local: + cmd.append('--local') + if deployment_mode: + cmd.append('--deployment') + if not user_install: + cmd.append('--system') + if gem_path: + cmd.extend(['--path', gem_path]) + if binstub_directory: + cmd.extend(['--binstubs', binstub_directory]) + else: + cmd.append('update') + if local: + cmd.append('--local') + + if extra_args: + cmd.extend(extra_args.split(' ')) + + rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True) + + module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/bzr.py b/ansible_collections/community/general/plugins/modules/bzr.py new file mode 100644 index 000000000..e7aca7c6b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/bzr.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, André Paramés +# Based on the Git module by Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: bzr +author: +- André Paramés (@andreparames) +short_description: Deploy software (or files) from bzr branches +description: + - Manage I(bzr) branches to deploy files or software. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - SSH or HTTP protocol address of the parent branch. + aliases: [ parent ] + required: true + type: str + dest: + description: + - Absolute path of where the branch should be cloned to. + required: true + type: path + version: + description: + - What version of the branch to clone. This can be the + bzr revno or revid. + default: head + type: str + force: + description: + - If C(true), any modified files in the working + tree will be discarded. Before 1.9 the default + value was C(true). + type: bool + default: false + executable: + description: + - Path to bzr executable to use. If not supplied, + the normal mechanism for resolving binary paths will be used. + type: str +''' + +EXAMPLES = ''' +- name: Checkout + community.general.bzr: + name: bzr+ssh://foosball.example.org/path/to/branch + dest: /srv/checkout + version: 22 +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Bzr(object): + def __init__(self, module, parent, dest, version, bzr_path): + self.module = module + self.parent = parent + self.dest = dest + self.version = version + self.bzr_path = bzr_path + + def _command(self, args_list, cwd=None, **kwargs): + (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) + return (rc, out, err) + + def get_version(self): + '''samples the version of the bzr branch''' + + cmd = "%s revno" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() + return revno + + def clone(self): + '''makes a new bzr branch if it does not already exist''' + dest_dirname = os.path.dirname(self.dest) + try: + os.makedirs(dest_dirname) + except Exception: + pass + if self.version.lower() != 'head': + args_list = ["branch", "-r", self.version, self.parent, self.dest] + else: + args_list = ["branch", self.parent, self.dest] + return self._command(args_list, check_rc=True, cwd=dest_dirname) + + def has_local_mods(self): + + cmd = "%s status -S" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + return len(lines) > 0 + + def reset(self, force): + ''' + Resets the index and working tree to head. + Discards any changes to tracked files in the working + tree since that commit. + ''' + if not force and self.has_local_mods(): + self.module.fail_json(msg="Local modifications exist in branch (force=false).") + return self._command(["revert"], check_rc=True, cwd=self.dest) + + def fetch(self): + '''updates branch from remote sources''' + if self.version.lower() != 'head': + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) + else: + (rc, out, err) = self._command(["pull"], cwd=self.dest) + if rc != 0: + self.module.fail_json(msg="Failed to pull") + return (rc, out, err) + + def switch_version(self): + '''once pulled, switch to a particular revno or revid''' + if self.version.lower() != 'head': + args_list = ["revert", "-r", self.version] + else: + args_list = ["revert"] + return self._command(args_list, check_rc=True, cwd=self.dest) + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', required=True), + name=dict(type='str', required=True, aliases=['parent']), + version=dict(type='str', default='head'), + force=dict(type='bool', default=False), + executable=dict(type='str'), + ) + ) + + dest = module.params['dest'] + parent = module.params['name'] + version = module.params['version'] + force = module.params['force'] + bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) + + bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') + + rc, out, err = (0, None, None) + + bzr = Bzr(module, parent, dest, version, bzr_path) + + # if there is no bzr configuration, do a branch operation + # else pull and switch the version + before = None + local_mods = False + if not os.path.exists(bzrconfig): + (rc, out, err) = bzr.clone() + + else: + # else do a pull + local_mods = bzr.has_local_mods() + before = bzr.get_version() + (rc, out, err) = bzr.reset(force) + if rc != 0: + module.fail_json(msg=err) + (rc, out, err) = bzr.fetch() + if rc != 0: + module.fail_json(msg=err) + + # switch to version specified regardless of whether + # we cloned or pulled + (rc, out, err) = bzr.switch_version() + + # determine if we changed anything + after = bzr.get_version() + changed = False + + if before != after or local_mods: + changed = True + + module.exit_json(changed=changed, before=before, after=after) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/campfire.py b/ansible_collections/community/general/plugins/modules/campfire.py new file mode 100644 index 000000000..1e0f1ecea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/campfire.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: campfire +short_description: Send a message to Campfire +description: + - Send a message to Campfire. + - Messages with newlines will result in a "Paste" message being sent. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + subscription: + type: str + description: + - The subscription name to use. + required: true + token: + type: str + description: + - API token. + required: true + room: + type: str + description: + - Room number to which the message should be sent. + required: true + msg: + type: str + description: + - The message body. + required: true + notify: + type: str + description: + - Send a notification sound before the message. + required: false + choices: ["56k", "bell", "bezos", "bueller", "clowntown", + "cottoneyejoe", "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", "greatjob", "greyjoy", + "guarantee", "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", "makeitso", "noooo", + "nyan", "ohmy", "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", "sexyback", + "story", "tada", "tmyk", "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", "yodel"] + +# informational: requirements for nodes +requirements: [ ] +author: "Adam Garside (@fabulops)" +''' + +EXAMPLES = ''' +- name: Send a message to Campfire + community.general.campfire: + subscription: foo + token: 12345 + room: 123 + msg: Task completed. + +- name: Send a message to Campfire + community.general.campfire: + subscription: foo + token: 12345 + room: 123 + notify: loggins + msg: Task completed ... with feeling. +''' + +try: + from html import escape as html_escape +except ImportError: + # Python-3.2 or later + import cgi + + def html_escape(text, quote=True): + return cgi.escape(text, quote) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + subscription=dict(required=True), + token=dict(required=True, no_log=True), + room=dict(required=True), + msg=dict(required=True), + notify=dict(required=False, + choices=["56k", "bell", "bezos", "bueller", + "clowntown", "cottoneyejoe", + "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", + "greatjob", "greyjoy", "guarantee", + "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", + "makeitso", "noooo", "nyan", "ohmy", + "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", + "sexyback", "story", "tada", "tmyk", + "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", + "yodel"]), + ), + supports_check_mode=False + ) + + subscription = module.params["subscription"] + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + notify = module.params["notify"] + + URI = "https://%s.campfirenow.com" % subscription + NSTR = "SoundMessage%s" + MSTR = "%s" + AGENT = "Ansible/1.2" + + # Hack to add basic auth username and password the way fetch_url expects + module.params['url_username'] = token + module.params['url_password'] = 'X' + + target_url = '%s/room/%s/speak.xml' % (URI, room) + headers = {'Content-Type': 'application/xml', + 'User-agent': AGENT} + + # Send some audible notification if requested + if notify: + response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (notify, info['status'])) + + # Send the message + response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (msg, info['status'])) + + module.exit_json(changed=True, room=room, msg=msg, notify=notify) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/capabilities.py b/ansible_collections/community/general/plugins/modules/capabilities.py new file mode 100644 index 000000000..9b72ac6ea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/capabilities.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: capabilities +short_description: Manage Linux capabilities +description: + - This module manipulates files privileges using the Linux capabilities(7) system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - Specifies the path to the file to be managed. + type: str + required: true + aliases: [ key ] + capability: + description: + - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) + type: str + required: true + aliases: [ cap ] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + type: str + choices: [ absent, present ] + default: present +notes: + - The capabilities system will automatically transform operators and flags into the effective set, + so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, + so you will want to ensure that your capabilities argument matches the final capabilities. +author: +- Nate Coraor (@natefoo) +''' + +EXAMPLES = r''' +- name: Set cap_sys_chroot+ep on /foo + community.general.capabilities: + path: /foo + capability: cap_sys_chroot+ep + state: present + +- name: Remove cap_net_bind_service from /bar + community.general.capabilities: + path: /bar + capability: cap_net_bind_service + state: absent +''' + +from ansible.module_utils.basic import AnsibleModule + +OPS = ('=', '-', '+') + + +class CapabilitiesModule(object): + platform = 'Linux' + distribution = None + + def __init__(self, module): + self.module = module + self.path = module.params['path'].strip() + self.capability = module.params['capability'].strip().lower() + self.state = module.params['state'] + self.getcap_cmd = module.get_bin_path('getcap', required=True) + self.setcap_cmd = module.get_bin_path('setcap', required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present') + + self.run() + + def run(self): + + current = self.getcap(self.path) + caps = [cap[0] for cap in current] + + if self.state == 'present' and self.capability_tup not in current: + # need to add capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list if it's already set (but op/flags differ) + current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) + # add new cap with correct op/flags + current.append(self.capability_tup) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + elif self.state == 'absent' and self.capability_tup[0] in caps: + # need to remove capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list and then set current list + current = filter(lambda x: x[0] != self.capability_tup[0], current) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=False, state=self.state) + + def getcap(self, path): + rval = [] + cmd = "%s -v %s" % (self.getcap_cmd, path) + rc, stdout, stderr = self.module.run_command(cmd) + # If file xattrs are set but no caps are set the output will be: + # '/foo =' + # If file xattrs are unset the output will be: + # '/foo' + # If the file does not exist, the stderr will be (with rc == 0...): + # '/foo (No such file or directory)' + if rc != 0 or stderr != "": + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + if stdout.strip() != path: + if ' =' in stdout: + # process output of an older version of libcap + caps = stdout.split(' =')[1].strip().split() + else: + # otherwise, we have a newer version here + # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git + caps = stdout.split()[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append((subcap, op, flags)) + else: + rval.append(self._parse_cap(cap)) + return rval + + def setcap(self, path, caps): + caps = ' '.join([''.join(cap) for cap in caps]) + cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + rc, stdout, stderr = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) + else: + return stdout + + def _parse_cap(self, cap, op_required=True): + opind = -1 + try: + i = 0 + while opind == -1: + opind = cap.find(OPS[i]) + i += 1 + except Exception: + if op_required: + self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) + else: + return (cap, None, None) + op = cap[opind] + cap, flags = cap.split(op) + return (cap, op, flags) + + +# ============================================================== +# main + +def main(): + # defining module + module = AnsibleModule( + argument_spec=dict( + path=dict(type='str', required=True, aliases=['key']), + capability=dict(type='str', required=True, aliases=['cap']), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + CapabilitiesModule(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cargo.py b/ansible_collections/community/general/plugins/modules/cargo.py new file mode 100644 index 000000000..24be43741 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cargo.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021 Radek Sprta +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = r""" +--- +module: cargo +short_description: Manage Rust packages with cargo +version_added: 4.3.0 +description: + - Manage Rust packages with cargo. +author: "Radek Sprta (@radek-sprta)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of a Rust package to install. + type: list + elements: str + required: true + path: + description: + -> + The base path where to install the Rust packages. Cargo automatically appends + C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin). + type: path + version: + description: + -> + The version to install. If I(name) contains multiple values, the module will + try to install all of them in this version. + type: str + required: false + state: + description: + - The state of the Rust package. + required: false + type: str + default: present + choices: [ "present", "absent", "latest" ] +requirements: + - cargo installed in bin path (recommended /usr/local/bin) +""" + +EXAMPLES = r""" +- name: Install "ludusavi" Rust package + community.general.cargo: + name: ludusavi + +- name: Install "ludusavi" Rust package in version 0.10.0 + community.general.cargo: + name: ludusavi + version: '0.10.0' + +- name: Install "ludusavi" Rust package to global location + community.general.cargo: + name: ludusavi + path: /usr/local + +- name: Remove "ludusavi" Rust package + community.general.cargo: + name: ludusavi + state: absent + +- name: Update "ludusavi" Rust package its latest version + community.general.cargo: + name: ludusavi + state: latest +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class Cargo(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs["name"] + self.path = kwargs["path"] + self.state = kwargs["state"] + self.version = kwargs["version"] + + self.executable = [module.get_bin_path("cargo", True)] + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if path is not None and not os.path.isdir(path): + self.module.fail_json(msg="Path %s is not a directory" % path) + self._path = path + + def _exec( + self, args, run_in_check_mode=False, check_rc=True, add_package_name=True + ): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + rc, out, err = self.module.run_command(cmd, check_rc=check_rc) + return out, err + return "", "" + + def get_installed(self): + cmd = ["install", "--list"] + data, dummy = self._exec(cmd, True, False, False) + + package_regex = re.compile(r"^([\w\-]+) v(.+):$") + installed = {} + for line in data.splitlines(): + package_info = package_regex.match(line) + if package_info: + installed[package_info.group(1)] = package_info.group(2) + + return installed + + def install(self, packages=None): + cmd = ["install"] + cmd.extend(packages or self.name) + if self.path: + cmd.append("--root") + cmd.append(self.path) + if self.version: + cmd.append("--version") + cmd.append(self.version) + return self._exec(cmd) + + def is_outdated(self, name): + installed_version = self.get_installed().get(name) + + cmd = ["search", name, "--limit", "1"] + data, dummy = self._exec(cmd, True, False, False) + + match = re.search(r'"(.+)"', data) + if match: + latest_version = match.group(1) + + return installed_version != latest_version + + def uninstall(self, packages=None): + cmd = ["uninstall"] + cmd.extend(packages or self.name) + return self._exec(cmd) + + +def main(): + arg_spec = dict( + name=dict(required=True, type="list", elements="str"), + path=dict(default=None, type="path"), + state=dict(default="present", choices=["present", "absent", "latest"]), + version=dict(default=None, type="str"), + ) + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params["name"] + path = module.params["path"] + state = module.params["state"] + version = module.params["version"] + + if not name: + module.fail_json(msg="Package name must be specified") + + # Set LANG env since we parse stdout + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + cargo = Cargo(module, name=name, path=path, state=state, version=version) + changed, out, err = False, None, None + installed_packages = cargo.get_installed() + if state == "present": + to_install = [ + n + for n in name + if (n not in installed_packages) + or (version and version != installed_packages[n]) + ] + if to_install: + changed = True + out, err = cargo.install(to_install) + elif state == "latest": + to_update = [ + n for n in name if n not in installed_packages or cargo.is_outdated(n) + ] + if to_update: + changed = True + out, err = cargo.install(to_update) + else: # absent + to_uninstall = [n for n in name if n in installed_packages] + if to_uninstall: + changed = True + out, err = cargo.uninstall(to_uninstall) + + module.exit_json(changed=changed, stdout=out, stderr=err) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/catapult.py b/ansible_collections/community/general/plugins/modules/catapult.py new file mode 100644 index 000000000..a3bbef6c4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/catapult.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Jonathan Mainguy +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# basis of code taken from the ansible twillio and nexmo modules + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: catapult +short_description: Send a sms / mms using the catapult bandwidth api +description: + - Allows notifications to be sent using sms / mms via the catapult bandwidth api. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + src: + type: str + description: + - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). + required: true + dest: + type: list + elements: str + description: + - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). + required: true + msg: + type: str + description: + - The contents of the text message (must be 2048 characters or less). + required: true + media: + type: str + description: + - For MMS messages, a media url to the location of the media to be sent with the message. + user_id: + type: str + description: + - User Id from Api account page. + required: true + api_token: + type: str + description: + - Api Token from Api account page. + required: true + api_secret: + type: str + description: + - Api Secret from Api account page. + required: true + +author: "Jonathan Mainguy (@Jmainguy)" +notes: + - Will return changed even if the media url is wrong. + - Will return changed if the destination number is invalid. + +''' + +EXAMPLES = ''' +- name: Send a mms to multiple users + community.general.catapult: + src: "+15035555555" + dest: + - "+12525089000" + - "+12018994225" + media: "http://example.com/foobar.jpg" + msg: "Task is complete" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" + +- name: Send a sms to a single user + community.general.catapult: + src: "+15035555555" + dest: "+12018994225" + msg: "Consider yourself notified" + user_id: "{{ user_id }}" + api_token: "{{ api_token }}" + api_secret: "{{ api_secret }}" + +''' + +RETURN = ''' +changed: + description: Whether the api accepted the message. + returned: always + type: bool + sample: true +''' + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def send(module, src, dest, msg, media, user_id, api_token, api_secret): + """ + Send the message + """ + AGENT = "Ansible" + URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id + data = {'from': src, 'to': dest, 'text': msg} + if media: + data['media'] = media + + headers = {'User-Agent': AGENT, 'Content-type': 'application/json'} + + # Hack module params to have the Basic auth params that fetch_url expects + module.params['url_username'] = api_token.replace('\n', '') + module.params['url_password'] = api_secret.replace('\n', '') + + return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(required=True), + dest=dict(required=True, type='list', elements='str'), + msg=dict(required=True), + user_id=dict(required=True), + api_token=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + media=dict(default=None, required=False), + ), + ) + + src = module.params['src'] + dest = module.params['dest'] + msg = module.params['msg'] + media = module.params['media'] + user_id = module.params['user_id'] + api_token = module.params['api_token'] + api_secret = module.params['api_secret'] + + for number in dest: + rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret) + if info["status"] != 201: + body = json.loads(info["body"]) + fail_msg = body["message"] + module.fail_json(msg=fail_msg) + + changed = True + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/circonus_annotation.py new file mode 100644 index 000000000..937610776 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/circonus_annotation.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2014-2015, Epic Games, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: circonus_annotation +short_description: Create an annotation in circonus +description: + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided +author: "Nick Harring (@NickatEpic)" +requirements: + - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) +notes: + - Check mode isn't supported. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: + - Circonus API key + required: true + category: + type: str + description: + - Annotation Category + required: true + description: + type: str + description: + - Description of annotation + required: true + title: + type: str + description: + - Title of annotation + required: true + start: + type: int + description: + - Unix timestamp of event start + - If not specified, it defaults to I(now). + stop: + type: int + description: + - Unix timestamp of event end + - If not specified, it defaults to I(now) + I(duration). + duration: + type: int + description: + - Duration in seconds of annotation + default: 0 +''' +EXAMPLES = ''' +- name: Create a simple annotation event with a source, defaults to start and end time of now + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + +- name: Create an annotation with a duration of 5 minutes and a default start time of now + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + duration: 300 + +- name: Create an annotation with a start_time and end_time + community.general.circonus_annotation: + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations + start_time: 1395940006 + end_time: 1395954407 +''' + +RETURN = ''' +annotation: + description: details about the created annotation + returned: success + type: complex + contains: + _cid: + description: annotation identifier + returned: success + type: str + sample: /annotation/100000 + _created: + description: creation timestamp + returned: success + type: int + sample: 1502236928 + _last_modified: + description: last modification timestamp + returned: success + type: int + sample: 1502236928 + _last_modified_by: + description: last modified by + returned: success + type: str + sample: /user/1000 + category: + description: category of the created annotation + returned: success + type: str + sample: alerts + title: + description: title of the created annotation + returned: success + type: str + sample: WARNING + description: + description: description of the created annotation + returned: success + type: str + sample: Host is down. + start: + description: timestamp, since annotation applies + returned: success + type: int + sample: Host is down. + stop: + description: timestamp, since annotation ends + returned: success + type: str + sample: Host is down. + rel_metrics: + description: Array of metrics related to this annotation, each metrics is a string. + returned: success + type: list + sample: + - 54321_kbps +''' +import json +import time +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests + HAS_REQUESTS = True +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + HAS_REQUESTS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six import PY3 +from ansible.module_utils.common.text.converters import to_native + + +def check_requests_dep(module): + """Check if an adequate requests version is available""" + if not HAS_REQUESTS: + module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + else: + required_version = '2.0.0' if PY3 else '1.0.0' + if LooseVersion(requests.__version__) < LooseVersion(required_version): + module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) + + +def post_annotation(annotation, api_key): + ''' Takes annotation dict and api_key string''' + base_url = 'https://api.circonus.com/v2' + anootate_post_endpoint = '/annotation' + resp = requests.post(base_url + anootate_post_endpoint, + headers=build_headers(api_key), data=json.dumps(annotation)) + resp.raise_for_status() + return resp + + +def create_annotation(module): + ''' Takes ansible module object ''' + annotation = {} + duration = module.params['duration'] + if module.params['start'] is not None: + start = module.params['start'] + else: + start = int(time.time()) + if module.params['stop'] is not None: + stop = module.params['stop'] + else: + stop = int(time.time()) + duration + annotation['start'] = start + annotation['stop'] = stop + annotation['category'] = module.params['category'] + annotation['description'] = module.params['description'] + annotation['title'] = module.params['title'] + return annotation + + +def build_headers(api_token): + '''Takes api token, returns headers with it included.''' + headers = {'X-Circonus-App-Name': 'ansible', + 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token, + 'Accept': 'application/json'} + return headers + + +def main(): + '''Main function, dispatches logic''' + module = AnsibleModule( + argument_spec=dict( + start=dict(type='int'), + stop=dict(type='int'), + category=dict(required=True), + title=dict(required=True), + description=dict(required=True), + duration=dict(default=0, type='int'), + api_key=dict(required=True, no_log=True) + ) + ) + + check_requests_dep(module) + + annotation = create_annotation(module) + try: + resp = post_annotation(annotation, module.params['api_key']) + except requests.exceptions.RequestException as e: + module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=True, annotation=resp.json()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cisco_webex.py b/ansible_collections/community/general/plugins/modules/cisco_webex.py new file mode 100644 index 000000000..2e5cb50ea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cisco_webex.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cisco_webex +short_description: Send a message to a Cisco Webex Teams Room or Individual +description: + - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. +author: Drew Rusell (@drew-russell) +notes: + - The C(recipient_id) type must be valid for the supplied C(recipient_id). + - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + recipient_type: + description: + - The request parameter you would like to send the message to. + - Messages can be sent to either a room or individual (by ID or E-Mail). + required: true + choices: ['roomId', 'toPersonEmail', 'toPersonId'] + type: str + + recipient_id: + description: + - The unique identifier associated with the supplied C(recipient_type). + required: true + type: str + + msg_type: + description: + - Specifies how you would like the message formatted. + default: text + choices: ['text', 'markdown'] + type: str + aliases: ['message_type'] + + personal_token: + description: + - Your personal access token required to validate the Webex Teams API. + required: true + aliases: ['token'] + type: str + + msg: + description: + - The message you would like to send. + required: true + type: str +''' + +EXAMPLES = """ +# Note: The following examples assume a variable file has been imported +# that contains the appropriate information. + +- name: Cisco Webex Teams - Markdown Message to a Room + community.general.cisco_webex: + recipient_type: roomId + recipient_id: "{{ room_id }}" + msg_type: markdown + personal_token: "{{ token }}" + msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**" + +- name: Cisco Webex Teams - Text Message to a Room + community.general.cisco_webex: + recipient_type: roomId + recipient_id: "{{ room_id }}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Room Message in Text" + +- name: Cisco Webex Teams - Text Message by an Individuals ID + community.general.cisco_webex: + recipient_type: toPersonId + recipient_id: "{{ person_id}}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID" + +- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address + community.general.cisco_webex: + recipient_type: toPersonEmail + recipient_id: "{{ person_email }}" + msg_type: text + personal_token: "{{ token }}" + msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" + +""" + +RETURN = """ +status_code: + description: + - The Response Code returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: int + sample: 200 + +message: + description: + - The Response Message returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: str + sample: OK (585 bytes) +""" +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def webex_msg(module): + """When check mode is specified, establish a read only connection, that does not return any user specific + data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual""" + + # Ansible Specific Variables + results = {} + ansible = module.params + + headers = { + 'Authorization': 'Bearer {0}'.format(ansible['personal_token']), + 'content-type': 'application/json' + } + + if module.check_mode: + url = "https://webexapis.com/v1/people/me" + payload = None + + else: + url = "https://webexapis.com/v1/messages" + + payload = { + ansible['recipient_type']: ansible['recipient_id'], + ansible['msg_type']: ansible['msg'] + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers) + + status_code = info['status'] + msg = info['msg'] + + # Module will fail if the response is not 200 + if status_code != 200: + results['failed'] = True + results['status_code'] = status_code + results['message'] = msg + else: + results['failed'] = False + results['status_code'] = status_code + + if module.check_mode: + results['message'] = 'Authentication Successful.' + else: + results['message'] = msg + + return results + + +def main(): + '''Ansible main. ''' + module = AnsibleModule( + argument_spec=dict( + recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), + recipient_id=dict(required=True, no_log=True), + msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), + personal_token=dict(required=True, no_log=True, aliases=['token']), + msg=dict(required=True), + ), + + supports_check_mode=True + ) + + results = webex_msg(module) + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py new file mode 100644 index 000000000..05135bd95 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_aa_policy +short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud +description: + - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the Anti Affinity Policy. + type: str + required: true + location: + description: + - Datacenter in which the policy lives/should live. + type: str + required: true + state: + description: + - Whether to create or delete the policy. + type: str + required: false + default: present + choices: ['present','absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create AA Policy + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create an Anti Affinity Policy + community.general.clc_aa_policy: + name: Hammer Time + location: UK3 + state: present + register: policy + + - name: Debug + ansible.builtin.debug: + var: policy + +- name: Delete AA Policy + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Delete an Anti Affinity Policy + community.general.clc_aa_policy: + name: Hammer Time + location: UK3 + state: absent + register: policy + + - name: Debug + ansible.builtin.debug: + var: policy +''' + +RETURN = ''' +policy: + description: The anti affinity policy information + returned: success + type: dict + sample: + { + "id":"1a28dd0988984d87b9cd61fa8da15424", + "name":"test_aa_policy", + "location":"UC1", + "links":[ + { + "rel":"self", + "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", + "verbs":[ + "GET", + "DELETE", + "PUT" + ] + }, + { + "rel":"location", + "href":"/v2/datacenters/wfad/UC1", + "id":"uc1", + "name":"UC1 - US West (Santa Clara)" + } + ] + } +''' + +__version__ = '${version}' + +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk: +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcAntiAffinityPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), + exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), + exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + location=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + return argument_spec + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_policies_for_datacenter(p) + + if p['state'] == "absent": + changed, policy = self._ensure_policy_is_absent(p) + else: + changed, policy = self._ensure_policy_is_present(p) + + if hasattr(policy, 'data'): + policy = policy.data + elif hasattr(policy, '__dict__'): + policy = policy.__dict__ + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_policies_for_datacenter(self, p): + """ + Get the Policies for a datacenter by calling the CLC API. + :param p: datacenter to get policies from + :return: policies in the datacenter + """ + response = {} + + policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) + + for policy in policies: + response[policy.name] = policy + return response + + def _create_policy(self, p): + """ + Create an Anti Affinity Policy using the CLC API. + :param p: datacenter to create policy in + :return: response dictionary from the CLC API. + """ + try: + return self.clc.v2.AntiAffinity.Create( + name=p['name'], + location=p['location']) + except CLCException as ex: + self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _delete_policy(self, p): + """ + Delete an Anti Affinity Policy using the CLC API. + :param p: datacenter to delete a policy from + :return: none + """ + try: + policy = self.policy_dict[p['name']] + policy.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _policy_exists(self, policy_name): + """ + Check to see if an Anti Affinity Policy exists + :param policy_name: name of the policy + :return: boolean of if the policy exists + """ + if policy_name in self.policy_dict: + return self.policy_dict.get(policy_name) + + return False + + def _ensure_policy_is_absent(self, p): + """ + Makes sure that a policy is absent + :param p: dictionary of policy name + :return: tuple of if a deletion occurred and the name of the policy that was deleted + """ + changed = False + if self._policy_exists(policy_name=p['name']): + changed = True + if not self.module.check_mode: + self._delete_policy(p) + return changed, None + + def _ensure_policy_is_present(self, p): + """ + Ensures that a policy is present + :param p: dictionary of a policy name + :return: tuple of if an addition occurred and the name of the policy that was added + """ + changed = False + policy = self._policy_exists(policy_name=p['name']) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_policy(p) + return changed, policy + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), + supports_check_mode=True) + clc_aa_policy = ClcAntiAffinityPolicy(module) + clc_aa_policy.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py new file mode 100644 index 000000000..b77c83e3b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py @@ -0,0 +1,536 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_alert_policy +short_description: Create or Delete Alert Policies at CenturyLink Cloud +description: + - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + alias: + description: + - The alias of your CLC Account + type: str + required: true + name: + description: + - The name of the alert policy. This is mutually exclusive with id + type: str + id: + description: + - The alert policy id. This is mutually exclusive with name + type: str + alert_recipients: + description: + - A list of recipient email ids to notify the alert. + This is required for state 'present' + type: list + elements: str + metric: + description: + - The metric on which to measure the condition that will trigger the alert. + This is required for state 'present' + type: str + choices: ['cpu','memory','disk'] + duration: + description: + - The length of time in minutes that the condition must exceed the threshold. + This is required for state 'present' + type: str + threshold: + description: + - The threshold that will trigger the alert when the metric equals or exceeds it. + This is required for state 'present' + This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 + type: int + state: + description: + - Whether to create or delete the policy. + type: str + default: present + choices: ['present','absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create Alert Policy Example + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create an Alert Policy for disk above 80% for 5 minutes + community.general.clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + alert_recipients: + - test1@centurylink.com + - test2@centurylink.com + metric: 'disk' + duration: '00:05:00' + threshold: 80 + state: present + register: policy + + - name: Debug + ansible.builtin.debug: var=policy + +- name: Delete Alert Policy Example + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Delete an Alert Policy + community.general.clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + state: absent + register: policy + + - name: Debug + ansible.builtin.debug: var=policy +''' + +RETURN = ''' +policy: + description: The alert policy information + returned: success + type: dict + sample: + { + "actions": [ + { + "action": "email", + "settings": { + "recipients": [ + "user1@domain.com", + "user1@domain.com" + ] + } + } + ], + "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", + "links": [ + { + "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", + "rel": "self", + "verbs": [ + "GET", + "DELETE", + "PUT" + ] + } + ], + "name": "test_alert", + "triggers": [ + { + "duration": "00:05:00", + "metric": "disk", + "threshold": 80.0 + } + ] + } +''' + +__version__ = '${version}' + +import json +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcAlertPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(), + id=dict(), + alias=dict(required=True), + alert_recipients=dict(type='list', elements='str'), + metric=dict( + choices=[ + 'cpu', + 'memory', + 'disk']), + duration=dict(type='str'), + threshold=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']) + ) + mutually_exclusive = [ + ['name', 'id'] + ] + return {'argument_spec': argument_spec, + 'mutually_exclusive': mutually_exclusive} + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_alert_policies(p['alias']) + + if p['state'] == 'present': + changed, policy = self._ensure_alert_policy_is_present() + else: + changed, policy = self._ensure_alert_policy_is_absent() + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_alert_policy_is_present(self): + """ + Ensures that the alert policy is present + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the created/updated alert policy + """ + changed = False + p = self.module.params + policy_name = p.get('name') + + if not policy_name: + self.module.fail_json(msg='Policy name is a required') + policy = self._alert_policy_exists(policy_name) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_alert_policy() + else: + changed_u, policy = self._ensure_alert_policy_is_updated(policy) + if changed_u: + changed = True + return changed, policy + + def _ensure_alert_policy_is_absent(self): + """ + Ensures that the alert policy is absent + :return: (changed, None) + changed: A flag representing if anything is modified + """ + changed = False + p = self.module.params + alert_policy_id = p.get('id') + alert_policy_name = p.get('name') + alias = p.get('alias') + if not alert_policy_id and not alert_policy_name: + self.module.fail_json( + msg='Either alert policy id or policy name is required') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id( + self.module, + alert_policy_name) + if alert_policy_id and alert_policy_id in self.policy_dict: + changed = True + if not self.module.check_mode: + self._delete_alert_policy(alias, alert_policy_id) + return changed, None + + def _ensure_alert_policy_is_updated(self, alert_policy): + """ + Ensures the alert policy is updated if anything is changed in the alert policy configuration + :param alert_policy: the target alert policy + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the updated the alert policy + """ + changed = False + p = self.module.params + alert_policy_id = alert_policy.get('id') + email_list = p.get('alert_recipients') + metric = p.get('metric') + duration = p.get('duration') + threshold = p.get('threshold') + policy = alert_policy + if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ + (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ + (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): + changed = True + elif email_list: + t_email_list = list( + alert_policy.get('actions')[0].get('settings').get('recipients')) + if set(email_list) != set(t_email_list): + changed = True + if changed and not self.module.check_mode: + policy = self._update_alert_policy(alert_policy_id) + return changed, policy + + def _get_alert_policies(self, alias): + """ + Get the alert policies for account alias by calling the CLC API. + :param alias: the account alias + :return: the alert policies for the account alias + """ + response = {} + + policies = self.clc.v2.API.Call('GET', + '/v2/alertPolicies/%s' + % alias) + + for policy in policies.get('items'): + response[policy.get('id')] = policy + return response + + def _create_alert_policy(self): + """ + Create an alert Policy using the CLC API. + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'POST', + '/v2/alertPolicies/%s' % alias, + arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to create alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _update_alert_policy(self, alert_policy_id): + """ + Update alert policy using the CLC API. + :param alert_policy_id: The clc alert policy id + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'PUT', '/v2/alertPolicies/%s/%s' % + (alias, alert_policy_id), arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to update alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _delete_alert_policy(self, alias, policy_id): + """ + Delete an alert policy using the CLC API. + :param alias : the account alias + :param policy_id: the alert policy id + :return: response dictionary from the CLC API. + """ + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/alertPolicies/%s/%s' % + (alias, policy_id), None) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to delete alert policy id "{0}". {1}'.format( + policy_id, str(e.response_text))) + return result + + def _alert_policy_exists(self, policy_name): + """ + Check to see if an alert policy exists + :param policy_name: name of the alert policy + :return: boolean of if the policy exists + """ + result = False + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == policy_name: + result = self.policy_dict.get(policy_id) + return result + + def _get_alert_policy_id(self, module, alert_policy_name): + """ + retrieves the alert policy id of the account based on the name of the policy + :param module: the AnsibleModule object + :param alert_policy_name: the alert policy name + :return: alert_policy_id: The alert policy id + """ + alert_policy_id = None + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = policy_id + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + argument_dict = ClcAlertPolicy._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_alert_policy = ClcAlertPolicy(module) + clc_alert_policy.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py new file mode 100644 index 000000000..672e06780 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_blueprint_package +short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud +description: + - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server_ids: + description: + - A list of server Ids to deploy the blue print package. + type: list + required: true + elements: str + package_id: + description: + - The package id of the blue print. + type: str + required: true + package_params: + description: + - The dictionary of arguments required to deploy the blue print. + type: dict + default: {} + required: false + state: + description: + - Whether to install or uninstall the package. Currently it supports only "present" for install action. + type: str + required: false + default: present + choices: ['present'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: str + default: 'True' + required: false +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Deploy package + community.general.clc_blueprint_package: + server_ids: + - UC1TEST-SERVER1 + - UC1TEST-SERVER2 + package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a + package_params: {} +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SERVER1", + "UC1TEST-SERVER2" + ] +''' + +__version__ = '${version}' + +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcBlueprintPackage: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + changed = False + changed_server_ids = [] + self._set_clc_credentials_from_env() + server_ids = p['server_ids'] + package_id = p['package_id'] + package_params = p['package_params'] + state = p['state'] + if state == 'present': + changed, changed_server_ids, request_list = self.ensure_package_installed( + server_ids, package_id, package_params) + self._wait_for_requests_to_complete(request_list) + self.module.exit_json(changed=changed, server_ids=changed_server_ids) + + @staticmethod + def define_argument_spec(): + """ + This function defines the dictionary object required for + package module + :return: the package dictionary object + """ + argument_spec = dict( + server_ids=dict(type='list', elements='str', required=True), + package_id=dict(required=True), + package_params=dict(type='dict', default={}), + wait=dict(default=True), # @FIXME should be bool? + state=dict(default='present', choices=['present']) + ) + return argument_spec + + def ensure_package_installed(self, server_ids, package_id, package_params): + """ + Ensure the package is installed in the given list of servers + :param server_ids: the server list where the package needs to be installed + :param package_id: the blueprint package id + :param package_params: the package arguments + :return: (changed, server_ids, request_list) + changed: A flag indicating if a change was made + server_ids: The list of servers modified + request_list: The list of request objects from clc-sdk + """ + changed = False + request_list = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to get servers from CLC') + for server in servers: + if not self.module.check_mode: + request = self.clc_install_package( + server, + package_id, + package_params) + request_list.append(request) + changed = True + return changed, server_ids, request_list + + def clc_install_package(self, server, package_id, package_params): + """ + Install the package to a given clc server + :param server: The server object where the package needs to be installed + :param package_id: The blue print package id + :param package_params: the required argument dict for the package installation + :return: The result object from the CLC API call + """ + result = None + try: + result = server.ExecutePackage( + package_id=package_id, + parameters=package_params) + except CLCException as ex: + self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( + package_id, server.id, ex.message + )) + return result + + def _wait_for_requests_to_complete(self, request_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param request_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in request_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process package install request') + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: the list of server ids + :param message: the error message to raise if there is any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + self.module.fail_json(msg=message + ': %s' % ex) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + Main function + :return: None + """ + module = AnsibleModule( + argument_spec=ClcBlueprintPackage.define_argument_spec(), + supports_check_mode=True + ) + clc_blueprint_package = ClcBlueprintPackage(module) + clc_blueprint_package.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py new file mode 100644 index 000000000..c832571d3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py @@ -0,0 +1,596 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_firewall_policy +short_description: Create/delete/update firewall policies +description: + - Create or delete or update firewall policies on Centurylink Cloud +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + location: + description: + - Target datacenter for the firewall policy + type: str + required: true + state: + description: + - Whether to create or delete the firewall policy + type: str + default: present + choices: ['present', 'absent'] + source: + description: + - The list of source addresses for traffic on the originating firewall. + This is required when state is 'present' + type: list + elements: str + destination: + description: + - The list of destination addresses for traffic on the terminating firewall. + This is required when state is 'present' + type: list + elements: str + ports: + description: + - The list of ports associated with the policy. + TCP and UDP can take in single ports or port ranges. + - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." + type: list + elements: str + firewall_policy_id: + description: + - Id of the firewall policy. This is required to update or delete an existing firewall policy + type: str + source_account_alias: + description: + - CLC alias for the source account + type: str + required: true + destination_account_alias: + description: + - CLC alias for the destination account + type: str + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: str + default: 'True' + enabled: + description: + - Whether the firewall policy is enabled or disabled + type: str + choices: ['True', 'False'] + default: 'True' +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +--- +- name: Create Firewall Policy + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create / Verify an Firewall Policy at CenturyLink Cloud + clc_firewall: + source_account_alias: WFAD + location: VA1 + state: present + source: 10.128.216.0/24 + destination: 10.128.216.0/24 + ports: Any + destination_account_alias: WFAD + +- name: Delete Firewall Policy + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Delete an Firewall Policy at CenturyLink Cloud + clc_firewall: + source_account_alias: WFAD + location: VA1 + state: absent + firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 +''' + +RETURN = ''' +firewall_policy_id: + description: The fire wall policy id + returned: success + type: str + sample: fc36f1bfd47242e488a9c44346438c05 +firewall_policy: + description: The fire wall policy information + returned: success + type: dict + sample: + { + "destination":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "destinationAccount":"wfad", + "enabled":true, + "id":"fc36f1bfd47242e488a9c44346438c05", + "links":[ + { + "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + } + ], + "ports":[ + "any" + ], + "source":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "status":"active" + } +''' + +__version__ = '${version}' + +import os +import traceback +from ansible.module_utils.six.moves.urllib.parse import urlparse +from time import sleep + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcFirewallPolicy: + + clc = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.firewall_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + location=dict(required=True), + source_account_alias=dict(required=True), + destination_account_alias=dict(), + firewall_policy_id=dict(), + ports=dict(type='list', elements='str'), + source=dict(type='list', elements='str'), + destination=dict(type='list', elements='str'), + wait=dict(default=True), # @FIXME type=bool + state=dict(default='present', choices=['present', 'absent']), + enabled=dict(default=True, choices=[True, False]) + ) + return argument_spec + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + changed = False + firewall_policy = None + location = self.module.params.get('location') + source_account_alias = self.module.params.get('source_account_alias') + destination_account_alias = self.module.params.get( + 'destination_account_alias') + firewall_policy_id = self.module.params.get('firewall_policy_id') + ports = self.module.params.get('ports') + source = self.module.params.get('source') + destination = self.module.params.get('destination') + wait = self.module.params.get('wait') + state = self.module.params.get('state') + enabled = self.module.params.get('enabled') + + self.firewall_dict = { + 'location': location, + 'source_account_alias': source_account_alias, + 'destination_account_alias': destination_account_alias, + 'firewall_policy_id': firewall_policy_id, + 'ports': ports, + 'source': source, + 'destination': destination, + 'wait': wait, + 'state': state, + 'enabled': enabled} + + self._set_clc_credentials_from_env() + + if state == 'absent': + changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( + source_account_alias, location, self.firewall_dict) + + elif state == 'present': + changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( + source_account_alias, location, self.firewall_dict) + + return self.module.exit_json( + changed=changed, + firewall_policy_id=firewall_policy_id, + firewall_policy=firewall_policy) + + @staticmethod + def _get_policy_id_from_response(response): + """ + Method to parse out the policy id from creation response + :param response: response from firewall creation API call + :return: policy_id: firewall policy id from creation call + """ + url = response.get('links')[0]['href'] + path = urlparse(url).path + path_list = os.path.split(path) + policy_id = path_list[-1] + return policy_id + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_firewall_policy_is_present( + self, + source_account_alias, + location, + firewall_dict): + """ + Ensures that a given firewall policy is present + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: dictionary of request parameters for firewall policy + :return: (changed, firewall_policy_id, firewall_policy) + changed: flag for if a change occurred + firewall_policy_id: the firewall policy id that was created/updated + firewall_policy: The firewall_policy object + """ + firewall_policy = None + firewall_policy_id = firewall_dict.get('firewall_policy_id') + + if firewall_policy_id is None: + if not self.module.check_mode: + response = self._create_firewall_policy( + source_account_alias, + location, + firewall_dict) + firewall_policy_id = self._get_policy_id_from_response( + response) + changed = True + else: + firewall_policy = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + if not firewall_policy: + return self.module.fail_json( + msg='Unable to find the firewall policy id : {0}'.format( + firewall_policy_id)) + changed = self._compare_get_request_with_dict( + firewall_policy, + firewall_dict) + if not self.module.check_mode and changed: + self._update_firewall_policy( + source_account_alias, + location, + firewall_policy_id, + firewall_dict) + if changed and firewall_policy_id: + firewall_policy = self._wait_for_requests_to_complete( + source_account_alias, + location, + firewall_policy_id) + return changed, firewall_policy_id, firewall_policy + + def _ensure_firewall_policy_is_absent( + self, + source_account_alias, + location, + firewall_dict): + """ + Ensures that a given firewall policy is removed if present + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: firewall policy to delete + :return: (changed, firewall_policy_id, response) + changed: flag for if a change occurred + firewall_policy_id: the firewall policy id that was deleted + response: response from CLC API call + """ + changed = False + response = [] + firewall_policy_id = firewall_dict.get('firewall_policy_id') + result = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + if result: + if not self.module.check_mode: + response = self._delete_firewall_policy( + source_account_alias, + location, + firewall_policy_id) + changed = True + return changed, firewall_policy_id, response + + def _create_firewall_policy( + self, + source_account_alias, + location, + firewall_dict): + """ + Creates the firewall policy for the given account alias + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_dict: dictionary of request parameters for firewall policy + :return: response from CLC API call + """ + payload = { + 'destinationAccount': firewall_dict.get('destination_account_alias'), + 'source': firewall_dict.get('source'), + 'destination': firewall_dict.get('destination'), + 'ports': firewall_dict.get('ports')} + try: + response = self.clc.v2.API.Call( + 'POST', '/v2-experimental/firewallPolicies/%s/%s' % + (source_account_alias, location), payload) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to create firewall policy. %s" % + str(e.response_text)) + return response + + def _delete_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id): + """ + Deletes a given firewall policy for an account alias in a datacenter + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: firewall policy id to delete + :return: response: response from CLC API call + """ + try: + response = self.clc.v2.API.Call( + 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, location, firewall_policy_id)) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to delete the firewall policy id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + def _update_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id, + firewall_dict): + """ + Updates a firewall policy for a given datacenter and account alias + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: firewall policy id to update + :param firewall_dict: dictionary of request parameters for firewall policy + :return: response: response from CLC API call + """ + try: + response = self.clc.v2.API.Call( + 'PUT', + '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, + location, + firewall_policy_id), + firewall_dict) + except APIFailedResponse as e: + return self.module.fail_json( + msg="Unable to update the firewall policy id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + @staticmethod + def _compare_get_request_with_dict(response, firewall_dict): + """ + Helper method to compare the json response for getting the firewall policy with the request parameters + :param response: response from the get method + :param firewall_dict: dictionary of request parameters for firewall policy + :return: changed: Boolean that returns true if there are differences between + the response parameters and the playbook parameters + """ + + changed = False + + response_dest_account_alias = response.get('destinationAccount') + response_enabled = response.get('enabled') + response_source = response.get('source') + response_dest = response.get('destination') + response_ports = response.get('ports') + request_dest_account_alias = firewall_dict.get( + 'destination_account_alias') + request_enabled = firewall_dict.get('enabled') + if request_enabled is None: + request_enabled = True + request_source = firewall_dict.get('source') + request_dest = firewall_dict.get('destination') + request_ports = firewall_dict.get('ports') + + if ( + response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( + response_enabled != request_enabled) or ( + response_source and response_source != request_source) or ( + response_dest and response_dest != request_dest) or ( + response_ports and response_ports != request_ports): + changed = True + return changed + + def _get_firewall_policy( + self, + source_account_alias, + location, + firewall_policy_id): + """ + Get back details for a particular firewall policy + :param source_account_alias: the source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: id of the firewall policy to get + :return: response - The response from CLC API call + """ + response = None + try: + response = self.clc.v2.API.Call( + 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % + (source_account_alias, location, firewall_policy_id)) + except APIFailedResponse as e: + if e.response_status_code != 404: + self.module.fail_json( + msg="Unable to fetch the firewall policy with id : {0}. {1}".format( + firewall_policy_id, str(e.response_text))) + return response + + def _wait_for_requests_to_complete( + self, + source_account_alias, + location, + firewall_policy_id, + wait_limit=50): + """ + Waits until the CLC requests are complete if the wait argument is True + :param source_account_alias: The source account alias for the firewall policy + :param location: datacenter of the firewall policy + :param firewall_policy_id: The firewall policy id + :param wait_limit: The number of times to check the status for completion + :return: the firewall_policy object + """ + wait = self.module.params.get('wait') + count = 0 + firewall_policy = None + while wait: + count += 1 + firewall_policy = self._get_firewall_policy( + source_account_alias, location, firewall_policy_id) + status = firewall_policy.get('status') + if status == 'active' or count > wait_limit: + wait = False + else: + # wait for 2 seconds + sleep(2) + return firewall_policy + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcFirewallPolicy._define_module_argument_spec(), + supports_check_mode=True) + + clc_firewall = ClcFirewallPolicy(module) + clc_firewall.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_group.py b/ansible_collections/community/general/plugins/modules/clc_group.py new file mode 100644 index 000000000..88aef2d63 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_group.py @@ -0,0 +1,522 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_group +short_description: Create/delete Server Groups at Centurylink Cloud +description: + - Create or delete Server Groups at Centurylink Centurylink Cloud +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the Server Group + type: str + required: true + description: + description: + - A description of the Server Group + type: str + required: false + parent: + description: + - The parent group of the server group. If parent is not provided, it creates the group at top level. + type: str + required: false + location: + description: + - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter + associated with the account + type: str + required: false + state: + description: + - Whether to create or delete the group + type: str + default: present + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: bool + default: true + required: false +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' + +# Create a Server Group + +--- +- name: Create Server Group + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create / Verify a Server Group at CenturyLink Cloud + community.general.clc_group: + name: My Cool Server Group + parent: Default Group + state: present + register: clc + + - name: Debug + ansible.builtin.debug: + var: clc + +# Delete a Server Group +- name: Delete Server Group + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Delete / Verify Absent a Server Group at CenturyLink Cloud + community.general.clc_group: + name: My Cool Server Group + parent: Default Group + state: absent + register: clc + + - name: Debug + ansible.builtin.debug: + var: clc +''' + +RETURN = ''' +group: + description: The group information + returned: success + type: dict + sample: + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":"2015-07-29T18:52:47Z", + "modifiedBy":"service.wfad", + "modifiedDate":"2015-07-29T18:52:47Z" + }, + "customFields":[ + + ], + "description":"test group", + "groups":[ + + ], + "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", + "links":[ + { + "href":"/v2/groups/wfad", + "rel":"createGroup", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad", + "rel":"createServer", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"parentGroup" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", + "rel":"defaults", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", + "rel":"billing" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", + "rel":"archiveGroupAction" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", + "rel":"statistics" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", + "rel":"horizontalAutoscalePolicyMapping", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + } + ], + "locationId":"UC1", + "name":"test group", + "status":"active", + "type":"default" + } +''' + +__version__ = '${version}' + +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcGroup(object): + + clc = None + root_group = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.group_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + location = self.module.params.get('location') + group_name = self.module.params.get('name') + parent_name = self.module.params.get('parent') + group_description = self.module.params.get('description') + state = self.module.params.get('state') + + self._set_clc_credentials_from_env() + self.group_dict = self._get_group_tree_for_datacenter( + datacenter=location) + + if state == "absent": + changed, group, requests = self._ensure_group_is_absent( + group_name=group_name, parent_name=parent_name) + if requests: + self._wait_for_requests_to_complete(requests) + else: + changed, group = self._ensure_group_is_present( + group_name=group_name, parent_name=parent_name, group_description=group_description) + try: + group = group.data + except AttributeError: + group = group_name + self.module.exit_json(changed=changed, group=group) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + description=dict(), + parent=dict(), + location=dict(), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=True)) + + return argument_spec + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_group_is_absent(self, group_name, parent_name): + """ + Ensure that group_name is absent by deleting it if necessary + :param group_name: string - the name of the clc server group to delete + :param parent_name: string - the name of the parent group for group_name + :return: changed, group + """ + changed = False + group = [] + results = [] + + if self._group_exists(group_name=group_name, parent_name=parent_name): + if not self.module.check_mode: + group.append(group_name) + result = self._delete_group(group_name) + results.append(result) + changed = True + return changed, group, results + + def _delete_group(self, group_name): + """ + Delete the provided server group + :param group_name: string - the server group to delete + :return: none + """ + response = None + group, parent = self.group_dict.get(group_name) + try: + response = group.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( + group_name, ex.response_text + )) + return response + + def _ensure_group_is_present( + self, + group_name, + parent_name, + group_description): + """ + Checks to see if a server group exists, creates it if it doesn't. + :param group_name: the name of the group to validate/create + :param parent_name: the name of the parent group for group_name + :param group_description: a short description of the server group (used when creating) + :return: (changed, group) - + changed: Boolean- whether a change was made, + group: A clc group object for the group + """ + if not self.root_group: + raise AssertionError("Implementation Error: Root Group not set") + parent = parent_name if parent_name is not None else self.root_group.name + description = group_description + changed = False + group = group_name + + parent_exists = self._group_exists(group_name=parent, parent_name=None) + child_exists = self._group_exists( + group_name=group_name, + parent_name=parent) + + if parent_exists and child_exists: + group, parent = self.group_dict[group_name] + changed = False + elif parent_exists and not child_exists: + if not self.module.check_mode: + group = self._create_group( + group=group, + parent=parent, + description=description) + changed = True + else: + self.module.fail_json( + msg="parent group: " + + parent + + " does not exist") + + return changed, group + + def _create_group(self, group, parent, description): + """ + Create the provided server group + :param group: clc_sdk.Group - the group to create + :param parent: clc_sdk.Parent - the parent group for {group} + :param description: string - a text description of the group + :return: clc_sdk.Group - the created group + """ + response = None + (parent, grandparent) = self.group_dict[parent] + try: + response = parent.Create(name=group, description=description) + except CLCException as ex: + self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( + group, ex.response_text)) + return response + + def _group_exists(self, group_name, parent_name): + """ + Check to see if a group exists + :param group_name: string - the group to check + :param parent_name: string - the parent of group_name + :return: boolean - whether the group exists + """ + result = False + if group_name in self.group_dict: + (group, parent) = self.group_dict[group_name] + if parent_name is None or parent_name == parent.name: + result = True + return result + + def _get_group_tree_for_datacenter(self, datacenter=None): + """ + Walk the tree of groups for a datacenter + :param datacenter: string - the datacenter to walk (ex: 'UC1') + :return: a dictionary of groups and parents + """ + self.root_group = self.clc.v2.Datacenter( + location=datacenter).RootGroup() + return self._walk_groups_recursive( + parent_group=None, + child_group=self.root_group) + + def _walk_groups_recursive(self, parent_group, child_group): + """ + Walk a parent-child tree of groups, starting with the provided child group + :param parent_group: clc_sdk.Group - the parent group to start the walk + :param child_group: clc_sdk.Group - the child group to start the walk + :return: a dictionary of groups and parents + """ + result = {str(child_group): (child_group, parent_group)} + groups = child_group.Subgroups().groups + if len(groups) > 0: + for group in groups: + if group.type != 'default': + continue + + result.update(self._walk_groups_recursive(child_group, group)) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process group request') + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcGroup._define_module_argument_spec(), + supports_check_mode=True) + + clc_group = ClcGroup(module) + clc_group.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py new file mode 100644 index 000000000..675cc1100 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py @@ -0,0 +1,945 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_loadbalancer +short_description: Create, Delete shared loadbalancers in CenturyLink Cloud +description: + - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the loadbalancer + type: str + required: true + description: + description: + - A description for the loadbalancer + type: str + alias: + description: + - The alias of your CLC Account + type: str + required: true + location: + description: + - The location of the datacenter where the load balancer resides in + type: str + required: true + method: + description: + -The balancing method for the load balancer pool + type: str + choices: ['leastConnection', 'roundRobin'] + persistence: + description: + - The persistence method for the load balancer + type: str + choices: ['standard', 'sticky'] + port: + description: + - Port to configure on the public-facing side of the load balancer pool + type: str + choices: ['80', '443'] + nodes: + description: + - A list of nodes that needs to be added to the load balancer pool + type: list + default: [] + elements: dict + status: + description: + - The status of the loadbalancer + type: str + default: enabled + choices: ['enabled', 'disabled'] + state: + description: + - Whether to create or delete the load balancer pool + type: str + default: present + choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples +- name: Create Loadbalancer + hosts: localhost + connection: local + tasks: + - name: Actually Create things + community.general.clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: present + +- name: Add node to an existing loadbalancer pool + hosts: localhost + connection: local + tasks: + - name: Actually Create things + community.general.clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.234 + privatePort: 80 + state: nodes_present + +- name: Remove node from an existing loadbalancer pool + hosts: localhost + connection: local + tasks: + - name: Actually Create things + community.general.clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.234 + privatePort: 80 + state: nodes_absent + +- name: Delete LoadbalancerPool + hosts: localhost + connection: local + tasks: + - name: Actually Delete things + community.general.clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: port_absent + +- name: Delete Loadbalancer + hosts: localhost + connection: local + tasks: + - name: Actually Delete things + community.general.clc_loadbalancer: + name: test + description: test + alias: TEST + location: WA1 + port: 443 + nodes: + - ipAddress: 10.11.22.123 + privatePort: 80 + state: absent +''' + +RETURN = ''' +loadbalancer: + description: The load balancer result object from CLC + returned: success + type: dict + sample: + { + "description":"test-lb", + "id":"ab5b18cb81e94ab9925b61d1ca043fb5", + "ipAddress":"66.150.174.197", + "links":[ + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", + "rel":"pools", + "verbs":[ + "GET", + "POST" + ] + } + ], + "name":"test-lb", + "pools":[ + + ], + "status":"enabled" + } +''' + +__version__ = '${version}' + +import json +import os +import traceback +from time import sleep + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcLoadBalancer: + + clc = None + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.lb_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Execute the main code path, and handle the request + :return: none + """ + changed = False + result_lb = None + loadbalancer_name = self.module.params.get('name') + loadbalancer_alias = self.module.params.get('alias') + loadbalancer_location = self.module.params.get('location') + loadbalancer_description = self.module.params.get('description') + loadbalancer_port = self.module.params.get('port') + loadbalancer_method = self.module.params.get('method') + loadbalancer_persistence = self.module.params.get('persistence') + loadbalancer_nodes = self.module.params.get('nodes') + loadbalancer_status = self.module.params.get('status') + state = self.module.params.get('state') + + if loadbalancer_description is None: + loadbalancer_description = loadbalancer_name + + self._set_clc_credentials_from_env() + + self.lb_dict = self._get_loadbalancer_list( + alias=loadbalancer_alias, + location=loadbalancer_location) + + if state == 'present': + changed, result_lb, lb_id = self.ensure_loadbalancer_present( + name=loadbalancer_name, + alias=loadbalancer_alias, + location=loadbalancer_location, + description=loadbalancer_description, + status=loadbalancer_status) + if loadbalancer_port: + changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( + lb_id=lb_id, + alias=loadbalancer_alias, + location=loadbalancer_location, + method=loadbalancer_method, + persistence=loadbalancer_persistence, + port=loadbalancer_port) + + if loadbalancer_nodes: + changed, result_nodes = self.ensure_lbpool_nodes_set( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + elif state == 'absent': + changed, result_lb = self.ensure_loadbalancer_absent( + name=loadbalancer_name, + alias=loadbalancer_alias, + location=loadbalancer_location) + + elif state == 'port_absent': + changed, result_lb = self.ensure_loadbalancerpool_absent( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port) + + elif state == 'nodes_present': + changed, result_lb = self.ensure_lbpool_nodes_present( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + + elif state == 'nodes_absent': + changed, result_lb = self.ensure_lbpool_nodes_absent( + alias=loadbalancer_alias, + location=loadbalancer_location, + name=loadbalancer_name, + port=loadbalancer_port, + nodes=loadbalancer_nodes) + + self.module.exit_json(changed=changed, loadbalancer=result_lb) + + def ensure_loadbalancer_present( + self, name, alias, location, description, status): + """ + Checks to see if a load balancer exists and creates one if it does not. + :param name: Name of loadbalancer + :param alias: Alias of account + :param location: Datacenter + :param description: Description of loadbalancer + :param status: Enabled / Disabled + :return: (changed, result, lb_id) + changed: Boolean whether a change was made + result: The result object from the CLC load balancer request + lb_id: The load balancer id + """ + changed = False + result = name + lb_id = self._loadbalancer_exists(name=name) + if not lb_id: + if not self.module.check_mode: + result = self.create_loadbalancer(name=name, + alias=alias, + location=location, + description=description, + status=status) + lb_id = result.get('id') + changed = True + + return changed, result, lb_id + + def ensure_loadbalancerpool_present( + self, lb_id, alias, location, method, persistence, port): + """ + Checks to see if a load balancer pool exists and creates one if it does not. + :param lb_id: The loadbalancer id + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param method: the load balancing method + :param persistence: the load balancing persistence type + :param port: the port that the load balancer will listen on + :return: (changed, group, pool_id) - + changed: Boolean whether a change was made + result: The result from the CLC API call + pool_id: The string id of the load balancer pool + """ + changed = False + result = port + if not lb_id: + return changed, None, None + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if not pool_id: + if not self.module.check_mode: + result = self.create_loadbalancerpool( + alias=alias, + location=location, + lb_id=lb_id, + method=method, + persistence=persistence, + port=port) + pool_id = result.get('id') + changed = True + + return changed, result, pool_id + + def ensure_loadbalancer_absent(self, name, alias, location): + """ + Checks to see if a load balancer exists and deletes it if it does + :param name: Name of the load balancer + :param alias: Alias of account + :param location: Datacenter + :return: (changed, result) + changed: Boolean whether a change was made + result: The result from the CLC API Call + """ + changed = False + result = name + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + if not self.module.check_mode: + result = self.delete_loadbalancer(alias=alias, + location=location, + name=name) + changed = True + return changed, result + + def ensure_loadbalancerpool_absent(self, alias, location, name, port): + """ + Checks to see if a load balancer pool exists and deletes it if it does + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer listens on + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = None + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed = True + if not self.module.check_mode: + result = self.delete_loadbalancerpool( + alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id) + else: + result = "Pool doesn't exist" + else: + result = "LB Doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool + and set the nodes if any in the list those doesn't exist + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: The list of nodes to be updated to the pool + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + result = {} + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_check=nodes) + if not nodes_exist: + changed = True + result = self.set_loadbalancernodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: the list of nodes to be added + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed, result = self.add_lbpool_nodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_add=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): + """ + Checks to see if the provided list of nodes exist for the pool and removes them if found any + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param name: the name of the load balancer + :param port: the port that the load balancer will listen on + :param nodes: the list of nodes to be removed + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + lb_exists = self._loadbalancer_exists(name=name) + if lb_exists: + lb_id = self._get_loadbalancer_id(name=name) + pool_id = self._loadbalancerpool_exists( + alias=alias, + location=location, + port=port, + lb_id=lb_id) + if pool_id: + changed, result = self.remove_lbpool_nodes(alias=alias, + location=location, + lb_id=lb_id, + pool_id=pool_id, + nodes_to_remove=nodes) + else: + result = "Pool doesn't exist" + else: + result = "Load balancer doesn't Exist" + return changed, result + + def create_loadbalancer(self, name, alias, location, description, status): + """ + Create a loadbalancer w/ params + :param name: Name of loadbalancer + :param alias: Alias of account + :param location: Datacenter + :param description: Description for loadbalancer to be created + :param status: Enabled / Disabled + :return: result: The result from the CLC API call + """ + result = None + try: + result = self.clc.v2.API.Call('POST', + '/v2/sharedLoadBalancers/%s/%s' % (alias, + location), + json.dumps({"name": name, + "description": description, + "status": status})) + sleep(1) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to create load balancer "{0}". {1}'.format( + name, str(e.response_text))) + return result + + def create_loadbalancerpool( + self, alias, location, lb_id, method, persistence, port): + """ + Creates a pool on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param method: the load balancing method + :param persistence: the load balancing persistence type + :param port: the port that the load balancer will listen on + :return: result: The result from the create API call + """ + result = None + try: + result = self.clc.v2.API.Call( + 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % + (alias, location, lb_id), json.dumps( + { + "port": port, "method": method, "persistence": persistence + })) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to create pool for load balancer id "{0}". {1}'.format( + lb_id, str(e.response_text))) + return result + + def delete_loadbalancer(self, alias, location, name): + """ + Delete CLC loadbalancer + :param alias: Alias for account + :param location: Datacenter + :param name: Name of the loadbalancer to delete + :return: result: The result from the CLC API call + """ + result = None + lb_id = self._get_loadbalancer_id(name=name) + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % + (alias, location, lb_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to delete load balancer "{0}". {1}'.format( + name, str(e.response_text))) + return result + + def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): + """ + Delete the pool on the provided load balancer + :param alias: The account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the load balancer pool + :return: result: The result from the delete API call + """ + result = None + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % + (alias, location, lb_id, pool_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to delete pool for load balancer id "{0}". {1}'.format( + lb_id, str(e.response_text))) + return result + + def _get_loadbalancer_id(self, name): + """ + Retrieves unique ID of loadbalancer + :param name: Name of loadbalancer + :return: Unique ID of the loadbalancer + """ + id = None + for lb in self.lb_dict: + if lb.get('name') == name: + id = lb.get('id') + return id + + def _get_loadbalancer_list(self, alias, location): + """ + Retrieve a list of loadbalancers + :param alias: Alias for account + :param location: Datacenter + :return: JSON data for all loadbalancers at datacenter + """ + result = None + try: + result = self.clc.v2.API.Call( + 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to fetch load balancers for account: {0}. {1}'.format( + alias, str(e.response_text))) + return result + + def _loadbalancer_exists(self, name): + """ + Verify a loadbalancer exists + :param name: Name of loadbalancer + :return: False or the ID of the existing loadbalancer + """ + result = False + + for lb in self.lb_dict: + if lb.get('name') == name: + result = lb.get('id') + return result + + def _loadbalancerpool_exists(self, alias, location, port, lb_id): + """ + Checks to see if a pool exists on the specified port on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param port: the port to check and see if it exists + :param lb_id: the id string of the provided load balancer + :return: result: The id string of the pool or False + """ + result = False + try: + pool_list = self.clc.v2.API.Call( + 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % + (alias, location, lb_id)) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( + lb_id, str(e.response_text))) + for pool in pool_list: + if int(pool.get('port')) == int(port): + result = pool.get('id') + return result + + def _loadbalancerpool_nodes_exists( + self, alias, location, lb_id, pool_id, nodes_to_check): + """ + Checks to see if a set of nodes exists on the specified port on the provided load balancer + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the provided load balancer + :param pool_id: the id string of the load balancer pool + :param nodes_to_check: the list of nodes to check for + :return: result: True / False indicating if the given nodes exist + """ + result = False + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_check: + if not node.get('status'): + node['status'] = 'enabled' + if node in nodes: + result = True + else: + result = False + return result + + def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): + """ + Updates nodes to the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes: a list of dictionaries containing the nodes to set + :return: result: The result from the CLC API call + """ + result = None + if not lb_id: + return result + if not self.module.check_mode: + try: + result = self.clc.v2.API.Call('PUT', + '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' + % (alias, location, lb_id, pool_id), json.dumps(nodes)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( + pool_id, str(e.response_text))) + return result + + def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): + """ + Add nodes to the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes_to_add: a list of dictionaries containing the nodes to add + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = {} + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_add: + if not node.get('status'): + node['status'] = 'enabled' + if node not in nodes: + changed = True + nodes.append(node) + if changed is True and not self.module.check_mode: + result = self.set_loadbalancernodes( + alias, + location, + lb_id, + pool_id, + nodes) + return changed, result + + def remove_lbpool_nodes( + self, alias, location, lb_id, pool_id, nodes_to_remove): + """ + Removes nodes from the provided pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :param nodes_to_remove: a list of dictionaries containing the nodes to remove + :return: (changed, result) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + result = {} + nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) + for node in nodes_to_remove: + if not node.get('status'): + node['status'] = 'enabled' + if node in nodes: + changed = True + nodes.remove(node) + if changed is True and not self.module.check_mode: + result = self.set_loadbalancernodes( + alias, + location, + lb_id, + pool_id, + nodes) + return changed, result + + def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): + """ + Return the list of nodes available to the provided load balancer pool + :param alias: the account alias + :param location: the datacenter the load balancer resides in + :param lb_id: the id string of the load balancer + :param pool_id: the id string of the pool + :return: result: The list of nodes + """ + result = None + try: + result = self.clc.v2.API.Call('GET', + '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' + % (alias, location, lb_id, pool_id)) + except APIFailedResponse as e: + self.module.fail_json( + msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( + pool_id, str(e.response_text))) + return result + + @staticmethod + def define_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + description=dict(), + location=dict(required=True), + alias=dict(required=True), + port=dict(choices=[80, 443]), + method=dict(choices=['leastConnection', 'roundRobin']), + persistence=dict(choices=['standard', 'sticky']), + nodes=dict(type='list', default=[], elements='dict'), + status=dict(default='enabled', choices=['enabled', 'disabled']), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'port_absent', + 'nodes_present', + 'nodes_absent']) + ) + return argument_spec + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), + supports_check_mode=True) + clc_loadbalancer = ClcLoadBalancer(module) + clc_loadbalancer.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/clc_modify_server.py new file mode 100644 index 000000000..b375d9d47 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_modify_server.py @@ -0,0 +1,975 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_modify_server +short_description: Modify servers in CenturyLink Cloud +description: + - An Ansible module to modify servers in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server_ids: + description: + - A list of server Ids to modify. + type: list + required: true + elements: str + cpu: + description: + - How many CPUs to update on the server + type: str + memory: + description: + - Memory (in GB) to set to the server. + type: str + anti_affinity_policy_id: + description: + - The anti affinity policy id to be set for a hyper scale server. + This is mutually exclusive with 'anti_affinity_policy_name' + type: str + anti_affinity_policy_name: + description: + - The anti affinity policy name to be set for a hyper scale server. + This is mutually exclusive with 'anti_affinity_policy_id' + type: str + alert_policy_id: + description: + - The alert policy id to be associated to the server. + This is mutually exclusive with 'alert_policy_name' + type: str + alert_policy_name: + description: + - The alert policy name to be associated to the server. + This is mutually exclusive with 'alert_policy_id' + type: str + state: + description: + - The state to insure that the provided resources are in. + type: str + default: 'present' + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: bool + default: true +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Set the cpu count to 4 on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + cpu: 4 + state: present + +- name: Set the memory to 8GB on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + memory: 8 + state: present + +- name: Set the anti affinity policy on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + anti_affinity_policy_name: 'aa_policy' + state: present + +- name: Remove the anti affinity policy on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + anti_affinity_policy_name: 'aa_policy' + state: absent + +- name: Add the alert policy on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + alert_policy_name: 'alert_policy' + state: present + +- name: Remove the alert policy on a server + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + alert_policy_name: 'alert_policy' + state: absent + +- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers + community.general.clc_modify_server: + server_ids: + - UC1TESTSVR01 + - UC1TESTSVR02 + cpu: 8 + memory: 16 + state: present +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +servers: + description: The list of server objects that are changed + returned: success + type: list + sample: + [ + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":1438196820, + "modifiedBy":"service.wfad", + "modifiedDate":1438196820 + }, + "description":"test-server", + "details":{ + "alertPolicies":[ + + ], + "cpu":1, + "customFields":[ + + ], + "diskCount":3, + "disks":[ + { + "id":"0:0", + "partitionPaths":[ + + ], + "sizeGB":1 + }, + { + "id":"0:1", + "partitionPaths":[ + + ], + "sizeGB":2 + }, + { + "id":"0:2", + "partitionPaths":[ + + ], + "sizeGB":14 + } + ], + "hostName":"", + "inMaintenanceMode":false, + "ipAddresses":[ + { + "internal":"10.1.1.1" + } + ], + "memoryGB":1, + "memoryMB":1024, + "partitions":[ + + ], + "powerState":"started", + "snapshots":[ + + ], + "storageGB":17 + }, + "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", + "id":"test-server", + "ipaddress":"10.120.45.23", + "isTemplate":false, + "links":[ + { + "href":"/v2/servers/wfad/test-server", + "id":"test-server", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"group" + }, + { + "href":"/v2/accounts/wfad", + "id":"wfad", + "rel":"account" + }, + { + "href":"/v2/billing/wfad/serverPricing/test-server", + "rel":"billing" + }, + { + "href":"/v2/servers/wfad/test-server/publicIPAddresses", + "rel":"publicIPAddresses", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/credentials", + "rel":"credentials" + }, + { + "href":"/v2/servers/wfad/test-server/statistics", + "rel":"statistics" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/capabilities", + "rel":"capabilities" + }, + { + "href":"/v2/servers/wfad/test-server/alertPolicies", + "rel":"alertPolicyMappings", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", + "rel":"antiAffinityPolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", + "rel":"cpuAutoscalePolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + } + ], + "locationId":"UC1", + "name":"test-server", + "os":"ubuntu14_64Bit", + "osType":"Ubuntu 14 64-bit", + "status":"active", + "storageType":"standard", + "type":"standard" + } + ] +''' + +__version__ = '${version}' + +import json +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcModifyServer: + clc = clc_sdk + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + self._set_clc_credentials_from_env() + + p = self.module.params + cpu = p.get('cpu') + memory = p.get('memory') + state = p.get('state') + if state == 'absent' and (cpu or memory): + return self.module.fail_json( + msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') + + server_ids = p['server_ids'] + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of instances to modify: %s' % + server_ids) + + (changed, server_dict_array, changed_server_ids) = self._modify_servers( + server_ids=server_ids) + + self.module.exit_json( + changed=changed, + server_ids=changed_server_ids, + servers=server_dict_array) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + server_ids=dict(type='list', required=True, elements='str'), + state=dict(default='present', choices=['present', 'absent']), + cpu=dict(), + memory=dict(), + anti_affinity_policy_id=dict(), + anti_affinity_policy_name=dict(), + alert_policy_id=dict(), + alert_policy_name=dict(), + wait=dict(type='bool', default=True) + ) + mutually_exclusive = [ + ['anti_affinity_policy_id', 'anti_affinity_policy_name'], + ['alert_policy_id', 'alert_policy_name'] + ] + return {"argument_spec": argument_spec, + "mutually_exclusive": mutually_exclusive} + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: The list of server ids + :param message: the error message to throw in case of any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + return self.module.fail_json(msg=message + ': %s' % ex.message) + + def _modify_servers(self, server_ids): + """ + modify the servers configuration on the provided list + :param server_ids: list of servers to modify + :return: a list of dictionaries with server information about the servers that were modified + """ + p = self.module.params + state = p.get('state') + server_params = { + 'cpu': p.get('cpu'), + 'memory': p.get('memory'), + 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), + 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), + 'alert_policy_id': p.get('alert_policy_id'), + 'alert_policy_name': p.get('alert_policy_name'), + } + changed = False + server_changed = False + aa_changed = False + ap_changed = False + server_dict_array = [] + result_server_ids = [] + request_list = [] + changed_servers = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return self.module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + for server in servers: + if state == 'present': + server_changed, server_result = self._ensure_server_config( + server, server_params) + if server_result: + request_list.append(server_result) + aa_changed = self._ensure_aa_policy_present( + server, + server_params) + ap_changed = self._ensure_alert_policy_present( + server, + server_params) + elif state == 'absent': + aa_changed = self._ensure_aa_policy_absent( + server, + server_params) + ap_changed = self._ensure_alert_policy_absent( + server, + server_params) + if server_changed or aa_changed or ap_changed: + changed_servers.append(server) + changed = True + + self._wait_for_requests(self.module, request_list) + self._refresh_servers(self.module, changed_servers) + + for server in changed_servers: + server_dict_array.append(server.data) + result_server_ids.append(server.id) + + return changed, server_dict_array, result_server_ids + + def _ensure_server_config( + self, server, server_params): + """ + ensures the server is updated with the provided cpu and memory + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + cpu = server_params.get('cpu') + memory = server_params.get('memory') + changed = False + result = None + + if not cpu: + cpu = server.cpu + if not memory: + memory = server.memory + if memory != server.memory or cpu != server.cpu: + if not self.module.check_mode: + result = self._modify_clc_server( + self.clc, + self.module, + server.id, + cpu, + memory) + changed = True + return changed, result + + @staticmethod + def _modify_clc_server(clc, module, server_id, cpu, memory): + """ + Modify the memory or CPU of a clc server. + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param server_id: id of the server to modify + :param cpu: the new cpu value + :param memory: the new memory value + :return: the result of CLC API call + """ + result = None + acct_alias = clc.v2.Account.GetAlias() + try: + # Update the server configuration + job_obj = clc.v2.API.Call('PATCH', + 'servers/%s/%s' % (acct_alias, + server_id), + json.dumps([{"op": "set", + "member": "memory", + "value": memory}, + {"op": "set", + "member": "cpu", + "value": cpu}])) + result = clc.v2.Requests(job_obj) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to update the server configuration for server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _wait_for_requests(module, request_list): + """ + Block until server provisioning requests are completed. + :param module: the AnsibleModule object + :param request_list: a list of clc-sdk.Request instances + :return: none + """ + wait = module.params.get('wait') + if wait: + # Requests.WaitUntilComplete() returns the count of failed requests + failed_requests_count = sum( + [request.WaitUntilComplete() for request in request_list]) + + if failed_requests_count > 0: + module.fail_json( + msg='Unable to process modify server request') + + @staticmethod + def _refresh_servers(module, servers): + """ + Loop through a list of servers and refresh them. + :param module: the AnsibleModule object + :param servers: list of clc-sdk.Server instances to refresh + :return: none + """ + for server in servers: + try: + server.Refresh() + except CLCException as ex: + module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( + server.id, ex.message + )) + + def _ensure_aa_policy_present( + self, server, server_params): + """ + ensures the server is updated with the provided anti affinity policy + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + + aa_policy_id = server_params.get('anti_affinity_policy_id') + aa_policy_name = server_params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + aa_policy_id = self._get_aa_policy_id_by_name( + self.clc, + self.module, + acct_alias, + aa_policy_name) + current_aa_policy_id = self._get_aa_policy_id_of_server( + self.clc, + self.module, + acct_alias, + server.id) + + if aa_policy_id and aa_policy_id != current_aa_policy_id: + self._modify_aa_policy( + self.clc, + self.module, + acct_alias, + server.id, + aa_policy_id) + changed = True + return changed + + def _ensure_aa_policy_absent( + self, server, server_params): + """ + ensures the provided anti affinity policy is removed from the server + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + aa_policy_id = server_params.get('anti_affinity_policy_id') + aa_policy_name = server_params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + aa_policy_id = self._get_aa_policy_id_by_name( + self.clc, + self.module, + acct_alias, + aa_policy_name) + current_aa_policy_id = self._get_aa_policy_id_of_server( + self.clc, + self.module, + acct_alias, + server.id) + + if aa_policy_id and aa_policy_id == current_aa_policy_id: + self._delete_aa_policy( + self.clc, + self.module, + acct_alias, + server.id) + changed = True + return changed + + @staticmethod + def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): + """ + modifies the anti affinity policy of the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param aa_policy_id: the anti affinity policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('PUT', + 'servers/%s/%s/antiAffinityPolicy' % ( + acct_alias, + server_id), + json.dumps({"id": aa_policy_id})) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _delete_aa_policy(clc, module, acct_alias, server_id): + """ + Delete the anti affinity policy of the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('DELETE', + 'servers/%s/%s/antiAffinityPolicy' % ( + acct_alias, + server_id), + json.dumps({})) + except APIFailedResponse as ex: + module.fail_json( + msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): + """ + retrieves the anti affinity policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param aa_policy_name: the anti affinity policy name + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + aa_policies = clc.v2.API.Call(method='GET', + url='antiAffinityPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json( + msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( + alias, str(ex.response_text))) + for aa_policy in aa_policies.get('items'): + if aa_policy.get('name') == aa_policy_name: + if not aa_policy_id: + aa_policy_id = aa_policy.get('id') + else: + return module.fail_json( + msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) + if not aa_policy_id: + module.fail_json( + msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) + return aa_policy_id + + @staticmethod + def _get_aa_policy_id_of_server(clc, module, alias, server_id): + """ + retrieves the anti affinity policy id of the server based on the CLC server id + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param server_id: the CLC server id + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + result = clc.v2.API.Call( + method='GET', url='servers/%s/%s/antiAffinityPolicy' % + (alias, server_id)) + aa_policy_id = result.get('id') + except APIFailedResponse as ex: + if ex.response_status_code != 404: + module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( + server_id, str(ex.response_text))) + return aa_policy_id + + def _ensure_alert_policy_present( + self, server, server_params): + """ + ensures the server is updated with the provided alert policy + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + acct_alias = self.clc.v2.Account.GetAlias() + alert_policy_id = server_params.get('alert_policy_id') + alert_policy_name = server_params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id_by_name( + self.clc, + self.module, + acct_alias, + alert_policy_name) + if alert_policy_id and not self._alert_policy_exists( + server, alert_policy_id): + self._add_alert_policy_to_server( + self.clc, + self.module, + acct_alias, + server.id, + alert_policy_id) + changed = True + return changed + + def _ensure_alert_policy_absent( + self, server, server_params): + """ + ensures the alert policy is removed from the server + :param server: the CLC server object + :param server_params: the dictionary of server parameters + :return: (changed, group) - + changed: Boolean whether a change was made + result: The result from the CLC API call + """ + changed = False + + acct_alias = self.clc.v2.Account.GetAlias() + alert_policy_id = server_params.get('alert_policy_id') + alert_policy_name = server_params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id_by_name( + self.clc, + self.module, + acct_alias, + alert_policy_name) + + if alert_policy_id and self._alert_policy_exists( + server, alert_policy_id): + self._remove_alert_policy_to_server( + self.clc, + self.module, + acct_alias, + server.id, + alert_policy_id) + changed = True + return changed + + @staticmethod + def _add_alert_policy_to_server( + clc, module, acct_alias, server_id, alert_policy_id): + """ + add the alert policy to CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param alert_policy_id: the alert policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('POST', + 'servers/%s/%s/alertPolicies' % ( + acct_alias, + server_id), + json.dumps({"id": alert_policy_id})) + except APIFailedResponse as ex: + module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _remove_alert_policy_to_server( + clc, module, acct_alias, server_id, alert_policy_id): + """ + remove the alert policy to the CLC server + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param acct_alias: the CLC account alias + :param server_id: the CLC server id + :param alert_policy_id: the alert policy id + :return: result: The result from the CLC API call + """ + result = None + if not module.check_mode: + try: + result = clc.v2.API.Call('DELETE', + 'servers/%s/%s/alertPolicies/%s' + % (acct_alias, server_id, alert_policy_id)) + except APIFailedResponse as ex: + module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( + server_id, str(ex.response_text))) + return result + + @staticmethod + def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): + """ + retrieves the alert policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param alert_policy_name: the alert policy name + :return: alert_policy_id: The alert policy id + """ + alert_policy_id = None + try: + alert_policies = clc.v2.API.Call(method='GET', + url='alertPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( + alias, str(ex.response_text))) + for alert_policy in alert_policies.get('items'): + if alert_policy.get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = alert_policy.get('id') + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _alert_policy_exists(server, alert_policy_id): + """ + Checks if the alert policy exists for the server + :param server: the clc server object + :param alert_policy_id: the alert policy + :return: True: if the given alert policy id associated to the server, False otherwise + """ + result = False + alert_policies = server.alertPolicies + if alert_policies: + for alert_policy in alert_policies: + if alert_policy.get('id') == alert_policy_id: + result = True + return result + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + + argument_dict = ClcModifyServer._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_modify_server = ClcModifyServer(module) + clc_modify_server.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_publicip.py b/ansible_collections/community/general/plugins/modules/clc_publicip.py new file mode 100644 index 000000000..c1bffcea0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_publicip.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_publicip +short_description: Add and Delete public ips on servers in CenturyLink Cloud +description: + - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + protocol: + description: + - The protocol that the public IP will listen for. + type: str + default: TCP + choices: ['TCP', 'UDP', 'ICMP'] + ports: + description: + - A list of ports to expose. This is required when state is 'present' + type: list + elements: int + server_ids: + description: + - A list of servers to create public ips on. + type: list + required: true + elements: str + state: + description: + - Determine whether to create or delete public IPs. If present module will not create a second public ip if one + already exists. + type: str + default: present + choices: ['present', 'absent'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + type: bool + default: true +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Add Public IP to Server + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create Public IP For Servers + community.general.clc_publicip: + protocol: TCP + ports: + - 80 + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + state: present + register: clc + + - name: Debug + ansible.builtin.debug: + var: clc + +- name: Delete Public IP from Server + hosts: localhost + gather_facts: false + connection: local + tasks: + - name: Create Public IP For Servers + community.general.clc_publicip: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + state: absent + register: clc + + - name: Debug + ansible.builtin.debug: + var: clc +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +''' + +__version__ = '${version}' + +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcPublicIp(object): + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + self._set_clc_credentials_from_env() + params = self.module.params + server_ids = params['server_ids'] + ports = params['ports'] + protocol = params['protocol'] + state = params['state'] + + if state == 'present': + changed, changed_server_ids, requests = self.ensure_public_ip_present( + server_ids=server_ids, protocol=protocol, ports=ports) + elif state == 'absent': + changed, changed_server_ids, requests = self.ensure_public_ip_absent( + server_ids=server_ids) + else: + return self.module.fail_json(msg="Unknown State: " + state) + self._wait_for_requests_to_complete(requests) + return self.module.exit_json(changed=changed, + server_ids=changed_server_ids) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + server_ids=dict(type='list', required=True, elements='str'), + protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), + ports=dict(type='list', elements='int'), + wait=dict(type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + ) + return argument_spec + + def ensure_public_ip_present(self, server_ids, protocol, ports): + """ + Ensures the given server ids having the public ip available + :param server_ids: the list of server ids + :param protocol: the ip protocol + :param ports: the list of ports to expose + :return: (changed, changed_server_ids, results) + changed: A flag indicating if there is any change + changed_server_ids : the list of server ids that are changed + results: The result list from clc public ip call + """ + changed = False + results = [] + changed_server_ids = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.PublicIPs().public_ips) == 0] + ports_to_expose = [{'protocol': protocol, 'port': port} + for port in ports] + for server in servers_to_change: + if not self.module.check_mode: + result = self._add_publicip_to_server(server, ports_to_expose) + results.append(result) + changed_server_ids.append(server.id) + changed = True + return changed, changed_server_ids, results + + def _add_publicip_to_server(self, server, ports_to_expose): + result = None + try: + result = server.PublicIPs().Add(ports_to_expose) + except CLCException as ex: + self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_public_ip_absent(self, server_ids): + """ + Ensures the given server ids having the public ip removed if there is any + :param server_ids: the list of server ids + :return: (changed, changed_server_ids, results) + changed: A flag indicating if there is any change + changed_server_ids : the list of server ids that are changed + results: The result list from clc public ip call + """ + changed = False + results = [] + changed_server_ids = [] + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.PublicIPs().public_ips) > 0] + for server in servers_to_change: + if not self.module.check_mode: + result = self._remove_publicip_from_server(server) + results.append(result) + changed_server_ids.append(server.id) + changed = True + return changed, changed_server_ids, results + + def _remove_publicip_from_server(self, server): + result = None + try: + for ip_address in server.PublicIPs().public_ips: + result = ip_address.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process public ip request') + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_servers_from_clc(self, server_ids, message): + """ + Gets list of servers form CLC api + """ + try: + return self.clc.v2.Servers(server_ids).servers + except CLCException as exception: + self.module.fail_json(msg=message + ': %s' % exception) + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcPublicIp._define_module_argument_spec(), + supports_check_mode=True + ) + clc_public_ip = ClcPublicIp(module) + clc_public_ip.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_server.py b/ansible_collections/community/general/plugins/modules/clc_server.py new file mode 100644 index 000000000..d2d019ff0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_server.py @@ -0,0 +1,1570 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_server +short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud +description: + - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + additional_disks: + description: + - The list of additional disks for the server + type: list + elements: dict + default: [] + add_public_ip: + description: + - Whether to add a public ip to the server + type: bool + default: false + alias: + description: + - The account alias to provision the servers under. + type: str + anti_affinity_policy_id: + description: + - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. + type: str + anti_affinity_policy_name: + description: + - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. + type: str + alert_policy_id: + description: + - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. + type: str + alert_policy_name: + description: + - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. + type: str + count: + description: + - The number of servers to build (mutually exclusive with exact_count) + default: 1 + type: int + count_group: + description: + - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. + type: str + cpu: + description: + - How many CPUs to provision on the server + default: 1 + type: int + cpu_autoscale_policy_id: + description: + - The autoscale policy to assign to the server. + type: str + custom_fields: + description: + - The list of custom fields to set on the server. + type: list + default: [] + elements: dict + description: + description: + - The description to set for the server. + type: str + exact_count: + description: + - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, + creating and deleting them to reach that count. Requires count_group to be set. + type: int + group: + description: + - The Server Group to create servers under. + type: str + default: 'Default Group' + ip_address: + description: + - The IP Address for the server. One is assigned if not provided. + type: str + location: + description: + - The Datacenter to create servers in. + type: str + managed_os: + description: + - Whether to create the server as 'Managed' or not. + type: bool + default: false + required: false + memory: + description: + - Memory in GB. + type: int + default: 1 + name: + description: + - A 1 to 6 character identifier to use for the server. This is required when state is 'present' + type: str + network_id: + description: + - The network UUID on which to create servers. + type: str + packages: + description: + - The list of blue print packages to run on the server after its created. + type: list + elements: dict + default: [] + password: + description: + - Password for the administrator / root user + type: str + primary_dns: + description: + - Primary DNS used by the server. + type: str + public_ip_protocol: + description: + - The protocol to use for the public ip if add_public_ip is set to True. + type: str + default: 'TCP' + choices: ['TCP', 'UDP', 'ICMP'] + public_ip_ports: + description: + - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. + type: list + elements: dict + default: [] + secondary_dns: + description: + - Secondary DNS used by the server. + type: str + server_ids: + description: + - Required for started, stopped, and absent states. + A list of server Ids to insure are started, stopped, or absent. + type: list + default: [] + elements: str + source_server_password: + description: + - The password for the source server if a clone is specified. + type: str + state: + description: + - The state to insure that the provided resources are in. + type: str + default: 'present' + choices: ['present', 'absent', 'started', 'stopped'] + storage_type: + description: + - The type of storage to attach to the server. + type: str + default: 'standard' + choices: ['standard', 'hyperscale'] + template: + description: + - The template to use for server creation. Will search for a template if a partial string is provided. + This is required when state is 'present' + type: str + ttl: + description: + - The time to live for the server in seconds. The server will be deleted when this time expires. + type: str + type: + description: + - The type of server to create. + type: str + default: 'standard' + choices: ['standard', 'hyperscale', 'bareMetal'] + configuration_id: + description: + - Only required for bare metal servers. + Specifies the identifier for the specific configuration type of bare metal server to deploy. + type: str + os_type: + description: + - Only required for bare metal servers. + Specifies the OS to provision with the bare metal server. + type: str + choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + type: bool + default: true +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Provision a single Ubuntu Server + community.general.clc_server: + name: test + template: ubuntu-14-64 + count: 1 + group: Default Group + state: present + +- name: Ensure 'Default Group' has exactly 5 servers + community.general.clc_server: + name: test + template: ubuntu-14-64 + exact_count: 5 + count_group: Default Group + group: Default Group + +- name: Stop a Server + community.general.clc_server: + server_ids: + - UC1ACCT-TEST01 + state: stopped + +- name: Start a Server + community.general.clc_server: + server_ids: + - UC1ACCT-TEST01 + state: started + +- name: Delete a Server + community.general.clc_server: + server_ids: + - UC1ACCT-TEST01 + state: absent +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +partially_created_server_ids: + description: The list of server ids that are partially created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +servers: + description: The list of server objects returned from CLC + returned: success + type: list + sample: + [ + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":1438196820, + "modifiedBy":"service.wfad", + "modifiedDate":1438196820 + }, + "description":"test-server", + "details":{ + "alertPolicies":[ + + ], + "cpu":1, + "customFields":[ + + ], + "diskCount":3, + "disks":[ + { + "id":"0:0", + "partitionPaths":[ + + ], + "sizeGB":1 + }, + { + "id":"0:1", + "partitionPaths":[ + + ], + "sizeGB":2 + }, + { + "id":"0:2", + "partitionPaths":[ + + ], + "sizeGB":14 + } + ], + "hostName":"", + "inMaintenanceMode":false, + "ipAddresses":[ + { + "internal":"10.1.1.1" + } + ], + "memoryGB":1, + "memoryMB":1024, + "partitions":[ + + ], + "powerState":"started", + "snapshots":[ + + ], + "storageGB":17 + }, + "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", + "id":"test-server", + "ipaddress":"10.120.45.23", + "isTemplate":false, + "links":[ + { + "href":"/v2/servers/wfad/test-server", + "id":"test-server", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"group" + }, + { + "href":"/v2/accounts/wfad", + "id":"wfad", + "rel":"account" + }, + { + "href":"/v2/billing/wfad/serverPricing/test-server", + "rel":"billing" + }, + { + "href":"/v2/servers/wfad/test-server/publicIPAddresses", + "rel":"publicIPAddresses", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/credentials", + "rel":"credentials" + }, + { + "href":"/v2/servers/wfad/test-server/statistics", + "rel":"statistics" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/capabilities", + "rel":"capabilities" + }, + { + "href":"/v2/servers/wfad/test-server/alertPolicies", + "rel":"alertPolicyMappings", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", + "rel":"antiAffinityPolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", + "rel":"cpuAutoscalePolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + } + ], + "locationId":"UC1", + "name":"test-server", + "os":"ubuntu14_64Bit", + "osType":"Ubuntu 14 64-bit", + "status":"active", + "storageType":"standard", + "type":"standard" + } + ] +''' + +__version__ = '${version}' + +import json +import os +import time +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException + from clc import APIFailedResponse +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcServer: + clc = clc_sdk + + def __init__(self, module): + """ + Construct module + """ + self.clc = clc_sdk + self.module = module + self.group_dict = {} + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + changed = False + new_server_ids = [] + server_dict_array = [] + + self._set_clc_credentials_from_env() + self.module.params = self._validate_module_params( + self.clc, + self.module) + p = self.module.params + state = p.get('state') + + # + # Handle each state + # + partial_servers_ids = [] + if state == 'absent': + server_ids = p['server_ids'] + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of instances to delete: %s' % + server_ids) + + (changed, + server_dict_array, + new_server_ids) = self._delete_servers(module=self.module, + clc=self.clc, + server_ids=server_ids) + + elif state in ('started', 'stopped'): + server_ids = p.get('server_ids') + if not isinstance(server_ids, list): + return self.module.fail_json( + msg='server_ids needs to be a list of servers to run: %s' % + server_ids) + + (changed, + server_dict_array, + new_server_ids) = self._start_stop_servers(self.module, + self.clc, + server_ids) + + elif state == 'present': + # Changed is always set to true when provisioning new instances + if not p.get('template') and p.get('type') != 'bareMetal': + return self.module.fail_json( + msg='template parameter is required for new instance') + + if p.get('exact_count') is None: + (server_dict_array, + new_server_ids, + partial_servers_ids, + changed) = self._create_servers(self.module, + self.clc) + else: + (server_dict_array, + new_server_ids, + partial_servers_ids, + changed) = self._enforce_count(self.module, + self.clc) + + self.module.exit_json( + changed=changed, + server_ids=new_server_ids, + partially_created_server_ids=partial_servers_ids, + servers=server_dict_array) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(), + template=dict(), + group=dict(default='Default Group'), + network_id=dict(), + location=dict(), + cpu=dict(default=1, type='int'), + memory=dict(default=1, type='int'), + alias=dict(), + password=dict(no_log=True), + ip_address=dict(), + storage_type=dict( + default='standard', + choices=[ + 'standard', + 'hyperscale']), + type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), + primary_dns=dict(), + secondary_dns=dict(), + additional_disks=dict(type='list', default=[], elements='dict'), + custom_fields=dict(type='list', default=[], elements='dict'), + ttl=dict(), + managed_os=dict(type='bool', default=False), + description=dict(), + source_server_password=dict(no_log=True), + cpu_autoscale_policy_id=dict(), + anti_affinity_policy_id=dict(), + anti_affinity_policy_name=dict(), + alert_policy_id=dict(), + alert_policy_name=dict(), + packages=dict(type='list', default=[], elements='dict'), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'started', + 'stopped']), + count=dict(type='int', default=1), + exact_count=dict(type='int', ), + count_group=dict(), + server_ids=dict(type='list', default=[], elements='str'), + add_public_ip=dict(type='bool', default=False), + public_ip_protocol=dict( + default='TCP', + choices=[ + 'TCP', + 'UDP', + 'ICMP']), + public_ip_ports=dict(type='list', default=[], elements='dict'), + configuration_id=dict(), + os_type=dict(choices=[ + 'redHat6_64Bit', + 'centOS6_64Bit', + 'windows2012R2Standard_64Bit', + 'ubuntu14_64Bit' + ]), + wait=dict(type='bool', default=True)) + + mutually_exclusive = [ + ['exact_count', 'count'], + ['exact_count', 'state'], + ['anti_affinity_policy_id', 'anti_affinity_policy_name'], + ['alert_policy_id', 'alert_policy_name'], + ] + return {"argument_spec": argument_spec, + "mutually_exclusive": mutually_exclusive} + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _validate_module_params(clc, module): + """ + Validate the module params, and lookup default values. + :param clc: clc-sdk instance to use + :param module: module to validate + :return: dictionary of validated params + """ + params = module.params + datacenter = ClcServer._find_datacenter(clc, module) + + ClcServer._validate_types(module) + ClcServer._validate_name(module) + + params['alias'] = ClcServer._find_alias(clc, module) + params['cpu'] = ClcServer._find_cpu(clc, module) + params['memory'] = ClcServer._find_memory(clc, module) + params['description'] = ClcServer._find_description(module) + params['ttl'] = ClcServer._find_ttl(clc, module) + params['template'] = ClcServer._find_template_id(module, datacenter) + params['group'] = ClcServer._find_group(module, datacenter).id + params['network_id'] = ClcServer._find_network_id(module, datacenter) + params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( + clc, + module) + params['alert_policy_id'] = ClcServer._find_alert_policy_id( + clc, + module) + + return params + + @staticmethod + def _find_datacenter(clc, module): + """ + Find the datacenter by calling the CLC API. + :param clc: clc-sdk instance to use + :param module: module to validate + :return: clc-sdk.Datacenter instance + """ + location = module.params.get('location') + try: + if not location: + account = clc.v2.Account() + location = account.data.get('primaryDataCenter') + data_center = clc.v2.Datacenter(location) + return data_center + except CLCException: + module.fail_json(msg="Unable to find location: {0}".format(location)) + + @staticmethod + def _find_alias(clc, module): + """ + Find or Validate the Account Alias by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: clc-sdk.Account instance + """ + alias = module.params.get('alias') + if not alias: + try: + alias = clc.v2.Account.GetAlias() + except CLCException as ex: + module.fail_json(msg='Unable to find account alias. {0}'.format( + ex.message + )) + return alias + + @staticmethod + def _find_cpu(clc, module): + """ + Find or validate the CPU value by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: Int value for CPU + """ + cpu = module.params.get('cpu') + group_id = module.params.get('group_id') + alias = module.params.get('alias') + state = module.params.get('state') + + if not cpu and state == 'present': + group = clc.v2.Group(id=group_id, + alias=alias) + if group.Defaults("cpu"): + cpu = group.Defaults("cpu") + else: + module.fail_json( + msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) + return cpu + + @staticmethod + def _find_memory(clc, module): + """ + Find or validate the Memory value by calling the CLC API + :param clc: clc-sdk instance to use + :param module: module to validate + :return: Int value for Memory + """ + memory = module.params.get('memory') + group_id = module.params.get('group_id') + alias = module.params.get('alias') + state = module.params.get('state') + + if not memory and state == 'present': + group = clc.v2.Group(id=group_id, + alias=alias) + if group.Defaults("memory"): + memory = group.Defaults("memory") + else: + module.fail_json(msg=str( + "Can\'t determine a default memory value. Please provide a value for memory.")) + return memory + + @staticmethod + def _find_description(module): + """ + Set the description module param to name if description is blank + :param module: the module to validate + :return: string description + """ + description = module.params.get('description') + if not description: + description = module.params.get('name') + return description + + @staticmethod + def _validate_types(module): + """ + Validate that type and storage_type are set appropriately, and fail if not + :param module: the module to validate + :return: none + """ + state = module.params.get('state') + server_type = module.params.get( + 'type').lower() if module.params.get('type') else None + storage_type = module.params.get( + 'storage_type').lower() if module.params.get('storage_type') else None + + if state == "present": + if server_type == "standard" and storage_type not in ( + "standard", "premium"): + module.fail_json( + msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) + + if server_type == "hyperscale" and storage_type != "hyperscale": + module.fail_json( + msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) + + @staticmethod + def _validate_name(module): + """ + Validate that name is the correct length if provided, fail if it's not + :param module: the module to validate + :return: none + """ + server_name = module.params.get('name') + state = module.params.get('state') + + if state == 'present' and ( + len(server_name) < 1 or len(server_name) > 6): + module.fail_json(msg=str( + "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) + + @staticmethod + def _find_ttl(clc, module): + """ + Validate that TTL is > 3600 if set, and fail if not + :param clc: clc-sdk instance to use + :param module: module to validate + :return: validated ttl + """ + ttl = module.params.get('ttl') + + if ttl: + if ttl <= 3600: + return module.fail_json(msg=str("Ttl cannot be <= 3600")) + else: + ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) + return ttl + + @staticmethod + def _find_template_id(module, datacenter): + """ + Find the template id by calling the CLC API. + :param module: the module to validate + :param datacenter: the datacenter to search for the template + :return: a valid clc template id + """ + lookup_template = module.params.get('template') + state = module.params.get('state') + type = module.params.get('type') + result = None + + if state == 'present' and type != 'bareMetal': + try: + result = datacenter.Templates().Search(lookup_template)[0].id + except CLCException: + module.fail_json( + msg=str( + "Unable to find a template: " + + lookup_template + + " in location: " + + datacenter.id)) + return result + + @staticmethod + def _find_network_id(module, datacenter): + """ + Validate the provided network id or return a default. + :param module: the module to validate + :param datacenter: the datacenter to search for a network id + :return: a valid network id + """ + network_id = module.params.get('network_id') + + if not network_id: + try: + network_id = datacenter.Networks().networks[0].id + # -- added for clc-sdk 2.23 compatibility + # datacenter_networks = clc_sdk.v2.Networks( + # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) + # network_id = datacenter_networks.networks[0].id + # -- end + except CLCException: + module.fail_json( + msg=str( + "Unable to find a network in location: " + + datacenter.id)) + + return network_id + + @staticmethod + def _find_aa_policy_id(clc, module): + """ + Validate if the anti affinity policy exist for the given name and throw error if not + :param clc: the clc-sdk instance + :param module: the module to validate + :return: aa_policy_id: the anti affinity policy id of the given name. + """ + aa_policy_id = module.params.get('anti_affinity_policy_id') + aa_policy_name = module.params.get('anti_affinity_policy_name') + if not aa_policy_id and aa_policy_name: + alias = module.params.get('alias') + aa_policy_id = ClcServer._get_anti_affinity_policy_id( + clc, + module, + alias, + aa_policy_name) + if not aa_policy_id: + module.fail_json( + msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) + return aa_policy_id + + @staticmethod + def _find_alert_policy_id(clc, module): + """ + Validate if the alert policy exist for the given name and throw error if not + :param clc: the clc-sdk instance + :param module: the module to validate + :return: alert_policy_id: the alert policy id of the given name. + """ + alert_policy_id = module.params.get('alert_policy_id') + alert_policy_name = module.params.get('alert_policy_name') + if not alert_policy_id and alert_policy_name: + alias = module.params.get('alias') + alert_policy_id = ClcServer._get_alert_policy_id_by_name( + clc=clc, + module=module, + alias=alias, + alert_policy_name=alert_policy_name + ) + if not alert_policy_id: + module.fail_json( + msg='No alert policy exist with name : %s' % alert_policy_name) + return alert_policy_id + + def _create_servers(self, module, clc, override_count=None): + """ + Create New Servers in CLC cloud + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :return: a list of dictionaries with server information about the servers that were created + """ + p = module.params + request_list = [] + servers = [] + server_dict_array = [] + created_server_ids = [] + partial_created_servers_ids = [] + + add_public_ip = p.get('add_public_ip') + public_ip_protocol = p.get('public_ip_protocol') + public_ip_ports = p.get('public_ip_ports') + + params = { + 'name': p.get('name'), + 'template': p.get('template'), + 'group_id': p.get('group'), + 'network_id': p.get('network_id'), + 'cpu': p.get('cpu'), + 'memory': p.get('memory'), + 'alias': p.get('alias'), + 'password': p.get('password'), + 'ip_address': p.get('ip_address'), + 'storage_type': p.get('storage_type'), + 'type': p.get('type'), + 'primary_dns': p.get('primary_dns'), + 'secondary_dns': p.get('secondary_dns'), + 'additional_disks': p.get('additional_disks'), + 'custom_fields': p.get('custom_fields'), + 'ttl': p.get('ttl'), + 'managed_os': p.get('managed_os'), + 'description': p.get('description'), + 'source_server_password': p.get('source_server_password'), + 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), + 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), + 'packages': p.get('packages'), + 'configuration_id': p.get('configuration_id'), + 'os_type': p.get('os_type') + } + + count = override_count if override_count else p.get('count') + + changed = False if count == 0 else True + + if not changed: + return server_dict_array, created_server_ids, partial_created_servers_ids, changed + for i in range(0, count): + if not module.check_mode: + req = self._create_clc_server(clc=clc, + module=module, + server_params=params) + server = req.requests[0].Server() + request_list.append(req) + servers.append(server) + + self._wait_for_requests(module, request_list) + self._refresh_servers(module, servers) + + ip_failed_servers = self._add_public_ip_to_servers( + module=module, + should_add_public_ip=add_public_ip, + servers=servers, + public_ip_protocol=public_ip_protocol, + public_ip_ports=public_ip_ports) + ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, + module=module, + servers=servers) + + for server in servers: + if server in ip_failed_servers or server in ap_failed_servers: + partial_created_servers_ids.append(server.id) + else: + # reload server details + server = clc.v2.Server(server.id) + server.data['ipaddress'] = server.details[ + 'ipAddresses'][0]['internal'] + + if add_public_ip and len(server.PublicIPs().public_ips) > 0: + server.data['publicip'] = str( + server.PublicIPs().public_ips[0]) + created_server_ids.append(server.id) + server_dict_array.append(server.data) + + return server_dict_array, created_server_ids, partial_created_servers_ids, changed + + def _enforce_count(self, module, clc): + """ + Enforce that there is the right number of servers in the provided group. + Starts or stops servers as necessary. + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :return: a list of dictionaries with server information about the servers that were created or deleted + """ + p = module.params + changed = False + count_group = p.get('count_group') + datacenter = ClcServer._find_datacenter(clc, module) + exact_count = p.get('exact_count') + server_dict_array = [] + partial_servers_ids = [] + changed_server_ids = [] + + # fail here if the exact count was specified without filtering + # on a group, as this may lead to a undesired removal of instances + if exact_count and count_group is None: + return module.fail_json( + msg="you must use the 'count_group' option with exact_count") + + servers, running_servers = ClcServer._find_running_servers_by_group( + module, datacenter, count_group) + + if len(running_servers) == exact_count: + changed = False + + elif len(running_servers) < exact_count: + to_create = exact_count - len(running_servers) + server_dict_array, changed_server_ids, partial_servers_ids, changed \ + = self._create_servers(module, clc, override_count=to_create) + + for server in server_dict_array: + running_servers.append(server) + + elif len(running_servers) > exact_count: + to_remove = len(running_servers) - exact_count + all_server_ids = sorted([x.id for x in running_servers]) + remove_ids = all_server_ids[0:to_remove] + + (changed, server_dict_array, changed_server_ids) \ + = ClcServer._delete_servers(module, clc, remove_ids) + + return server_dict_array, changed_server_ids, partial_servers_ids, changed + + @staticmethod + def _wait_for_requests(module, request_list): + """ + Block until server provisioning requests are completed. + :param module: the AnsibleModule object + :param request_list: a list of clc-sdk.Request instances + :return: none + """ + wait = module.params.get('wait') + if wait: + # Requests.WaitUntilComplete() returns the count of failed requests + failed_requests_count = sum( + [request.WaitUntilComplete() for request in request_list]) + + if failed_requests_count > 0: + module.fail_json( + msg='Unable to process server request') + + @staticmethod + def _refresh_servers(module, servers): + """ + Loop through a list of servers and refresh them. + :param module: the AnsibleModule object + :param servers: list of clc-sdk.Server instances to refresh + :return: none + """ + for server in servers: + try: + server.Refresh() + except CLCException as ex: + module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( + server.id, ex.message + )) + + @staticmethod + def _add_public_ip_to_servers( + module, + should_add_public_ip, + servers, + public_ip_protocol, + public_ip_ports): + """ + Create a public IP for servers + :param module: the AnsibleModule object + :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False + :param servers: List of servers to add public ips to + :param public_ip_protocol: a protocol to allow for the public ips + :param public_ip_ports: list of ports to allow for the public ips + :return: none + """ + failed_servers = [] + if not should_add_public_ip: + return failed_servers + + ports_lst = [] + request_list = [] + server = None + + for port in public_ip_ports: + ports_lst.append( + {'protocol': public_ip_protocol, 'port': port}) + try: + if not module.check_mode: + for server in servers: + request = server.PublicIPs().Add(ports_lst) + request_list.append(request) + except APIFailedResponse: + failed_servers.append(server) + ClcServer._wait_for_requests(module, request_list) + return failed_servers + + @staticmethod + def _add_alert_policy_to_servers(clc, module, servers): + """ + Associate the alert policy to servers + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param servers: List of servers to add alert policy to + :return: failed_servers: the list of servers which failed while associating alert policy + """ + failed_servers = [] + p = module.params + alert_policy_id = p.get('alert_policy_id') + alias = p.get('alias') + + if alert_policy_id and not module.check_mode: + for server in servers: + try: + ClcServer._add_alert_policy_to_server( + clc=clc, + alias=alias, + server_id=server.id, + alert_policy_id=alert_policy_id) + except CLCException: + failed_servers.append(server) + return failed_servers + + @staticmethod + def _add_alert_policy_to_server( + clc, alias, server_id, alert_policy_id): + """ + Associate an alert policy to a clc server + :param clc: the clc-sdk instance to use + :param alias: the clc account alias + :param server_id: The clc server id + :param alert_policy_id: the alert policy id to be associated to the server + :return: none + """ + try: + clc.v2.API.Call( + method='POST', + url='servers/%s/%s/alertPolicies' % (alias, server_id), + payload=json.dumps( + { + 'id': alert_policy_id + })) + except APIFailedResponse as e: + raise CLCException( + 'Failed to associate alert policy to the server : {0} with Error {1}'.format( + server_id, str(e.response_text))) + + @staticmethod + def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): + """ + Returns the alert policy id for the given alert policy name + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the clc account alias + :param alert_policy_name: the name of the alert policy + :return: alert_policy_id: the alert policy id + """ + alert_policy_id = None + policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) + if not policies: + return alert_policy_id + for policy in policies.get('items'): + if policy.get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = policy.get('id') + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _delete_servers(module, clc, server_ids): + """ + Delete the servers on the provided list + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :param server_ids: list of servers to delete + :return: a list of dictionaries with server information about the servers that were deleted + """ + terminated_server_ids = [] + server_dict_array = [] + request_list = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = clc.v2.Servers(server_ids).Servers() + for server in servers: + if not module.check_mode: + request_list.append(server.Delete()) + ClcServer._wait_for_requests(module, request_list) + + for server in servers: + terminated_server_ids.append(server.id) + + return True, server_dict_array, terminated_server_ids + + @staticmethod + def _start_stop_servers(module, clc, server_ids): + """ + Start or Stop the servers on the provided list + :param module: the AnsibleModule object + :param clc: the clc-sdk instance to use + :param server_ids: list of servers to start or stop + :return: a list of dictionaries with server information about the servers that were started or stopped + """ + p = module.params + state = p.get('state') + changed = False + changed_servers = [] + server_dict_array = [] + result_server_ids = [] + request_list = [] + + if not isinstance(server_ids, list) or len(server_ids) < 1: + return module.fail_json( + msg='server_ids should be a list of servers, aborting') + + servers = clc.v2.Servers(server_ids).Servers() + for server in servers: + if server.powerState != state: + changed_servers.append(server) + if not module.check_mode: + request_list.append( + ClcServer._change_server_power_state( + module, + server, + state)) + changed = True + + ClcServer._wait_for_requests(module, request_list) + ClcServer._refresh_servers(module, changed_servers) + + for server in set(changed_servers + servers): + try: + server.data['ipaddress'] = server.details[ + 'ipAddresses'][0]['internal'] + server.data['publicip'] = str( + server.PublicIPs().public_ips[0]) + except (KeyError, IndexError): + pass + + server_dict_array.append(server.data) + result_server_ids.append(server.id) + + return changed, server_dict_array, result_server_ids + + @staticmethod + def _change_server_power_state(module, server, state): + """ + Change the server powerState + :param module: the module to check for intended state + :param server: the server to start or stop + :param state: the intended powerState for the server + :return: the request object from clc-sdk call + """ + result = None + try: + if state == 'started': + result = server.PowerOn() + else: + # Try to shut down the server and fall back to power off when unable to shut down. + result = server.ShutDown() + if result and hasattr(result, 'requests') and result.requests[0]: + return result + else: + result = server.PowerOff() + except CLCException: + module.fail_json( + msg='Unable to change power state for server {0}'.format( + server.id)) + return result + + @staticmethod + def _find_running_servers_by_group(module, datacenter, count_group): + """ + Find a list of running servers in the provided group + :param module: the AnsibleModule object + :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group + :param count_group: the group to count the servers + :return: list of servers, and list of running servers + """ + group = ClcServer._find_group( + module=module, + datacenter=datacenter, + lookup_group=count_group) + + servers = group.Servers().Servers() + running_servers = [] + + for server in servers: + if server.status == 'active' and server.powerState == 'started': + running_servers.append(server) + + return servers, running_servers + + @staticmethod + def _find_group(module, datacenter, lookup_group=None): + """ + Find a server group in a datacenter by calling the CLC API + :param module: the AnsibleModule instance + :param datacenter: clc-sdk.Datacenter instance to search for the group + :param lookup_group: string name of the group to search for + :return: clc-sdk.Group instance + """ + if not lookup_group: + lookup_group = module.params.get('group') + try: + return datacenter.Groups().Get(lookup_group) + except CLCException: + pass + + # The search above only acts on the main + result = ClcServer._find_group_recursive( + module, + datacenter.Groups(), + lookup_group) + + if result is None: + module.fail_json( + msg=str( + "Unable to find group: " + + lookup_group + + " in location: " + + datacenter.id)) + + return result + + @staticmethod + def _find_group_recursive(module, group_list, lookup_group): + """ + Find a server group by recursively walking the tree + :param module: the AnsibleModule instance to use + :param group_list: a list of groups to search + :param lookup_group: the group to look for + :return: list of groups + """ + result = None + for group in group_list.groups: + subgroups = group.Subgroups() + try: + return subgroups.Get(lookup_group) + except CLCException: + result = ClcServer._find_group_recursive( + module, + subgroups, + lookup_group) + + if result is not None: + break + + return result + + @staticmethod + def _create_clc_server( + clc, + module, + server_params): + """ + Call the CLC Rest API to Create a Server + :param clc: the clc-python-sdk instance to use + :param module: the AnsibleModule instance to use + :param server_params: a dictionary of params to use to create the servers + :return: clc-sdk.Request object linked to the queued server request + """ + + try: + res = clc.v2.API.Call( + method='POST', + url='servers/%s' % + (server_params.get('alias')), + payload=json.dumps( + { + 'name': server_params.get('name'), + 'description': server_params.get('description'), + 'groupId': server_params.get('group_id'), + 'sourceServerId': server_params.get('template'), + 'isManagedOS': server_params.get('managed_os'), + 'primaryDNS': server_params.get('primary_dns'), + 'secondaryDNS': server_params.get('secondary_dns'), + 'networkId': server_params.get('network_id'), + 'ipAddress': server_params.get('ip_address'), + 'password': server_params.get('password'), + 'sourceServerPassword': server_params.get('source_server_password'), + 'cpu': server_params.get('cpu'), + 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), + 'memoryGB': server_params.get('memory'), + 'type': server_params.get('type'), + 'storageType': server_params.get('storage_type'), + 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), + 'customFields': server_params.get('custom_fields'), + 'additionalDisks': server_params.get('additional_disks'), + 'ttl': server_params.get('ttl'), + 'packages': server_params.get('packages'), + 'configurationId': server_params.get('configuration_id'), + 'osType': server_params.get('os_type')})) + + result = clc.v2.Requests(res) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( + server_params.get('name'), + ex.response_text + )) + + # + # Patch the Request object so that it returns a valid server + + # Find the server's UUID from the API response + server_uuid = [obj['id'] + for obj in res['links'] if obj['rel'] == 'self'][0] + + # Change the request server method to a _find_server_by_uuid closure so + # that it will work + result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( + clc, + module, + server_uuid, + server_params.get('alias')) + + return result + + @staticmethod + def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): + """ + retrieves the anti affinity policy id of the server based on the name of the policy + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param alias: the CLC account alias + :param aa_policy_name: the anti affinity policy name + :return: aa_policy_id: The anti affinity policy id + """ + aa_policy_id = None + try: + aa_policies = clc.v2.API.Call(method='GET', + url='antiAffinityPolicies/%s' % alias) + except APIFailedResponse as ex: + return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( + alias, ex.response_text)) + for aa_policy in aa_policies.get('items'): + if aa_policy.get('name') == aa_policy_name: + if not aa_policy_id: + aa_policy_id = aa_policy.get('id') + else: + return module.fail_json( + msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) + return aa_policy_id + + # + # This is the function that gets patched to the Request.server object using a lamda closure + # + + @staticmethod + def _find_server_by_uuid_w_retry( + clc, module, svr_uuid, alias=None, retries=5, back_out=2): + """ + Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. + :param clc: the clc-sdk instance to use + :param module: the AnsibleModule object + :param svr_uuid: UUID of the server + :param retries: the number of retry attempts to make prior to fail. default is 5 + :param alias: the Account Alias to search + :return: a clc-sdk.Server instance + """ + if not alias: + alias = clc.v2.Account.GetAlias() + + # Wait and retry if the api returns a 404 + while True: + retries -= 1 + try: + server_obj = clc.v2.API.Call( + method='GET', url='servers/%s/%s?uuid=true' % + (alias, svr_uuid)) + server_id = server_obj['id'] + server = clc.v2.Server( + id=server_id, + alias=alias, + server_obj=server_obj) + return server + + except APIFailedResponse as e: + if e.response_status_code != 404: + return module.fail_json( + msg='A failure response was received from CLC API when ' + 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % + (svr_uuid, e.response_status_code, e.message)) + if retries == 0: + return module.fail_json( + msg='Unable to reach the CLC API after 5 attempts') + time.sleep(back_out) + back_out *= 2 + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + argument_dict = ClcServer._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_server = ClcServer(module) + clc_server.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py new file mode 100644 index 000000000..82b2a9956 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015 CenturyLink +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: clc_server_snapshot +short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud +description: + - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server_ids: + description: + - The list of CLC server Ids. + type: list + required: true + elements: str + expiration_days: + description: + - The number of days to keep the server snapshot before it expires. + type: int + default: 7 + required: false + state: + description: + - The state to insure that the provided resources are in. + type: str + default: 'present' + required: false + choices: ['present', 'absent', 'restore'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + default: 'True' + required: false + type: str +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Create server snapshot + community.general.clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + expiration_days: 10 + wait: true + state: present + +- name: Restore server snapshot + community.general.clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: true + state: restore + +- name: Delete server snapshot + community.general.clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: true + state: absent +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +''' + +__version__ = '${version}' + +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REQUESTS_IMP_ERR = None +try: + import requests +except ImportError: + REQUESTS_IMP_ERR = traceback.format_exc() + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +CLC_IMP_ERR = None +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_IMP_ERR = traceback.format_exc() + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +class ClcSnapshot: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + + if not CLC_FOUND: + self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) + if not REQUESTS_FOUND: + self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + server_ids = p['server_ids'] + expiration_days = p['expiration_days'] + state = p['state'] + request_list = [] + changed = False + changed_servers = [] + + self._set_clc_credentials_from_env() + if state == 'present': + changed, request_list, changed_servers = self.ensure_server_snapshot_present( + server_ids=server_ids, + expiration_days=expiration_days) + elif state == 'absent': + changed, request_list, changed_servers = self.ensure_server_snapshot_absent( + server_ids=server_ids) + elif state == 'restore': + changed, request_list, changed_servers = self.ensure_server_snapshot_restore( + server_ids=server_ids) + + self._wait_for_requests_to_complete(request_list) + return self.module.exit_json( + changed=changed, + server_ids=changed_servers) + + def ensure_server_snapshot_present(self, server_ids, expiration_days): + """ + Ensures the given set of server_ids have the snapshots created + :param server_ids: The list of server_ids to create the snapshot + :param expiration_days: The number of days to keep the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) == 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._create_server_snapshot(server, expiration_days) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _create_server_snapshot(self, server, expiration_days): + """ + Create the snapshot for the CLC server + :param server: the CLC server object + :param expiration_days: The number of days to keep the snapshot + :return: the create request object from CLC API Call + """ + result = None + try: + result = server.CreateSnapshot( + delete_existing=True, + expiration_days=expiration_days) + except CLCException as ex: + self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_absent(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots removed + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._delete_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _delete_server_snapshot(self, server): + """ + Delete snapshot for the CLC server + :param server: the CLC server object + :return: the delete snapshot request object from CLC API + """ + result = None + try: + result = server.DeleteSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_restore(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots restored + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._restore_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _restore_server_snapshot(self, server): + """ + Restore snapshot for the CLC server + :param server: the CLC server object + :return: the restore snapshot request object from CLC API + """ + result = None + try: + result = server.RestoreSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process server snapshot request') + + @staticmethod + def define_argument_spec(): + """ + This function defines the dictionary object required for + package module + :return: the package dictionary object + """ + argument_spec = dict( + server_ids=dict(type='list', required=True, elements='str'), + expiration_days=dict(default=7, type='int'), + wait=dict(default=True), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'restore']), + ) + return argument_spec + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: The list of server ids + :param message: The error message to throw in case of any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + return self.module.fail_json(msg=message + ': %s' % ex) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + Main function + :return: None + """ + module = AnsibleModule( + argument_spec=ClcSnapshot.define_argument_spec(), + supports_check_mode=True + ) + clc_snapshot = ClcSnapshot(module) + clc_snapshot.process_request() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py new file mode 100644 index 000000000..d8209cc61 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cloud_init_data_facts +short_description: Retrieve facts of cloud-init +description: + - Gathers facts by reading the status.json and result.json of cloud-init. +author: René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + filter: + description: + - Filter facts + type: str + choices: [ status, result ] +notes: + - See http://cloudinit.readthedocs.io/ for more information about cloud-init. +''' + +EXAMPLES = ''' +- name: Gather all facts of cloud init + community.general.cloud_init_data_facts: + register: result + +- ansible.builtin.debug: + var: result + +- name: Wait for cloud init to finish + community.general.cloud_init_data_facts: + filter: status + register: res + until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" + retries: 50 + delay: 5 +''' + +RETURN = ''' +--- +cloud_init_data_facts: + description: Facts of result and status. + returned: success + type: dict + sample: '{ + "status": { + "v1": { + "datasource": "DataSourceCloudStack", + "errors": [] + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + }' +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +CLOUD_INIT_PATH = "/var/lib/cloud/data" + + +def gather_cloud_init_data_facts(module): + res = { + 'cloud_init_data_facts': dict() + } + + for i in ['result', 'status']: + filter = module.params.get('filter') + if filter is None or filter == i: + res['cloud_init_data_facts'][i] = dict() + json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') + + if os.path.exists(json_file): + f = open(json_file, 'rb') + contents = to_text(f.read(), errors='surrogate_or_strict') + f.close() + + if contents: + res['cloud_init_data_facts'][i] = module.from_json(contents) + return res + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filter=dict(choices=['result', 'status']), + ), + supports_check_mode=True, + ) + + facts = gather_cloud_init_data_facts(module) + result = dict(changed=False, ansible_facts=facts, **facts) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py new file mode 100644 index 000000000..8f45fcef3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py @@ -0,0 +1,893 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016 Michael Gruener +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: cloudflare_dns +author: +- Michael Gruener (@mgruener) +requirements: + - python >= 2.6 +short_description: Manage Cloudflare DNS records +description: + - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_token: + description: + - API token. + - Required for api token authentication. + - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. + type: str + required: false + version_added: '0.2.0' + account_api_key: + description: + - Account API key. + - Required for api keys authentication. + - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + type: str + required: false + aliases: [ account_api_token ] + account_email: + description: + - Account email. Required for API keys authentication. + type: str + required: false + algorithm: + description: + - Algorithm number. + - Required for I(type=DS) and I(type=SSHFP) when I(state=present). + type: int + cert_usage: + description: + - Certificate usage number. + - Required for I(type=TLSA) when I(state=present). + type: int + choices: [ 0, 1, 2, 3 ] + hash_type: + description: + - Hash type number. + - Required for I(type=DS), I(type=SSHFP) and I(type=TLSA) when I(state=present). + type: int + choices: [ 1, 2 ] + key_tag: + description: + - DNSSEC key tag. + - Needed for I(type=DS) when I(state=present). + type: int + port: + description: + - Service port. + - Required for I(type=SRV) and I(type=TLSA). + type: int + priority: + description: + - Record priority. + - Required for I(type=MX) and I(type=SRV) + default: 1 + type: int + proto: + description: + - Service protocol. Required for I(type=SRV) and I(type=TLSA). + - Common values are TCP and UDP. + - Before Ansible 2.6 only TCP and UDP were available. + type: str + proxied: + description: + - Proxy through Cloudflare network or just use DNS. + type: bool + default: false + record: + description: + - Record to add. + - Required if I(state=present). + - Default is C(@) (e.g. the zone name). + type: str + default: '@' + aliases: [ name ] + selector: + description: + - Selector number. + - Required for I(type=TLSA) when I(state=present). + choices: [ 0, 1 ] + type: int + service: + description: + - Record service. + - Required for I(type=SRV). + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with I(state=present). + - This will delete all other records with the same record name and type. + type: bool + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [ absent, present ] + default: present + timeout: + description: + - Timeout for Cloudflare API calls. + type: int + default: 30 + ttl: + description: + - The TTL to give the new record. + - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. + type: int + default: 1 + type: + description: + - The type of DNS record to create. Required if I(state=present). + - I(type=DS), I(type=SSHFP) and I(type=TLSA) added in Ansible 2.7. + type: str + choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] + value: + description: + - The record value. + - Required for I(state=present). + type: str + aliases: [ content ] + weight: + description: + - Service weight. + - Required for I(type=SRV). + type: int + default: 1 + zone: + description: + - The name of the Zone to work with (e.g. "example.com"). + - The Zone must already exist. + type: str + required: true + aliases: [ domain ] +''' + +EXAMPLES = r''' +- name: Create a test.example.net A record to point to 127.0.0.1 + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + account_email: test@example.com + account_api_key: dummyapitoken + register: record + +- name: Create a record using api token + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + api_token: dummyapitoken + +- name: Create a example.net CNAME record to example.com + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Change its TTL + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + ttl: 600 + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Delete the record + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + account_email: test@example.com + account_api_key: dummyapitoken + state: absent + +- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network + community.general.cloudflare_dns: + zone: example.net + type: CNAME + value: example.com + proxied: true + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +# This deletes all other TXT records named "test.example.net" +- name: Create TXT record "test.example.net" with value "unique value" + community.general.cloudflare_dns: + domain: example.net + record: test + type: TXT + value: unique value + solo: true + account_email: test@example.com + account_api_key: dummyapitoken + state: present + +- name: Create an SRV record _foo._tcp.example.net + community.general.cloudflare_dns: + domain: example.net + service: foo + proto: tcp + port: 3500 + priority: 10 + weight: 20 + type: SRV + value: fooserver.example.net + +- name: Create a SSHFP record login.example.com + community.general.cloudflare_dns: + zone: example.com + record: login + type: SSHFP + algorithm: 4 + hash_type: 2 + value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1 + +- name: Create a TLSA record _25._tcp.mail.example.com + community.general.cloudflare_dns: + zone: example.com + record: mail + port: 25 + proto: tcp + type: TLSA + cert_usage: 3 + selector: 1 + hash_type: 1 + value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 + +- name: Create a DS record for subdomain.example.com + community.general.cloudflare_dns: + zone: example.com + record: subdomain + type: DS + key_tag: 5464 + algorithm: 8 + hash_type: 2 + value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB +''' + +RETURN = r''' +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: complex + contains: + content: + description: The record content (details depend on record type). + returned: success + type: str + sample: 192.0.2.91 + created_on: + description: The record creation date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + data: + description: Additional record data. + returned: success, if type is SRV, DS, SSHFP or TLSA + type: dict + sample: { + name: "jabber", + port: 8080, + priority: 10, + proto: "_tcp", + service: "_xmpp", + target: "jabberhost.sample.com", + weight: 5, + } + id: + description: The record ID. + returned: success + type: str + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available. + returned: success + type: bool + sample: false + meta: + description: No documentation available. + returned: success + type: dict + sample: { auto_added: false } + modified_on: + description: Record modification date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + name: + description: The record name as FQDN (including _service and _proto for SRV). + returned: success + type: str + sample: www.sample.com + priority: + description: Priority of the MX record. + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: Whether this record can be proxied through Cloudflare. + returned: success + type: bool + sample: false + proxied: + description: Whether the record is proxied through Cloudflare. + returned: success + type: bool + sample: false + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + zone_id: + description: The ID of the zone containing the record. + returned: success + type: str + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: The name of the zone containing the record. + returned: success + type: str + sample: sample.com +''' + +import json + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.urls import fetch_url + + +def lowercase_string(param): + if not isinstance(param, str): + return param + return param.lower() + + +class CloudflareAPI(object): + + cf_api_endpoint = 'https://api.cloudflare.com/client/v4' + changed = False + + def __init__(self, module): + self.module = module + self.api_token = module.params['api_token'] + self.account_api_key = module.params['account_api_key'] + self.account_email = module.params['account_email'] + self.algorithm = module.params['algorithm'] + self.cert_usage = module.params['cert_usage'] + self.hash_type = module.params['hash_type'] + self.key_tag = module.params['key_tag'] + self.port = module.params['port'] + self.priority = module.params['priority'] + self.proto = lowercase_string(module.params['proto']) + self.proxied = module.params['proxied'] + self.selector = module.params['selector'] + self.record = lowercase_string(module.params['record']) + self.service = lowercase_string(module.params['service']) + self.is_solo = module.params['solo'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + self.ttl = module.params['ttl'] + self.type = module.params['type'] + self.value = module.params['value'] + self.weight = module.params['weight'] + self.zone = lowercase_string(module.params['zone']) + + if self.record == '@': + self.record = self.zone + + if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None): + self.value = self.value.rstrip('.').lower() + + if (self.type == 'AAAA') and (self.value is not None): + self.value = self.value.lower() + + if (self.type == 'SRV'): + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_' + self.proto + if (self.service is not None) and (not self.service.startswith('_')): + self.service = '_' + self.service + + if (self.type == 'TLSA'): + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_' + self.proto + if (self.port is not None): + self.port = '_' + str(self.port) + + if not self.record.endswith(self.zone): + self.record = self.record + '.' + self.zone + + if (self.type == 'DS'): + if self.record == self.zone: + self.module.fail_json(msg="DS records only apply to subdomains.") + + def _cf_simple_api_call(self, api_call, method='GET', payload=None): + if self.api_token: + headers = { + 'Authorization': 'Bearer ' + self.api_token, + 'Content-Type': 'application/json', + } + else: + headers = { + 'X-Auth-Email': self.account_email, + 'X-Auth-Key': self.account_api_key, + 'Content-Type': 'application/json', + } + data = None + if payload: + try: + data = json.dumps(payload) + except Exception as e: + self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + + resp, info = fetch_url(self.module, + self.cf_api_endpoint + api_call, + headers=headers, + data=data, + method=method, + timeout=self.timeout) + + if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]: + self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg'))) + + error_msg = '' + if info['status'] == 401: + # Unauthorized + error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 403: + # Forbidden + error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 429: + # Too many requests + error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 405: + # Method not allowed + error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 415: + # Unsupported Media Type + error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + elif info['status'] == 400: + # Bad Request + error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call) + + result = None + try: + content = resp.read() + except AttributeError: + if info['body']: + content = info['body'] + else: + error_msg += "; The API response was empty" + + if content: + try: + result = json.loads(to_text(content, errors='surrogate_or_strict')) + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + + # Without a valid/parsed JSON response no more error processing can be done + if result is None: + self.module.fail_json(msg=error_msg) + + if 'success' not in result: + error_msg += "; Unexpected error details: {0}".format(result.get('error')) + self.module.fail_json(msg=error_msg) + + if not result['success']: + error_msg += "; Error details: " + for error in result['errors']: + error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message']) + if 'error_chain' in error: + for chain_error in error['error_chain']: + error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message']) + self.module.fail_json(msg=error_msg) + + return result, info['status'] + + def _cf_api_call(self, api_call, method='GET', payload=None): + result, status = self._cf_simple_api_call(api_call, method, payload) + + data = result['result'] + + if 'result_info' in result: + pagination = result['result_info'] + if pagination['total_pages'] > 1: + next_page = int(pagination['page']) + 1 + parameters = ['page={0}'.format(next_page)] + # strip "page" parameter from call parameters (if there are any) + if '?' in api_call: + raw_api_call, query = api_call.split('?', 1) + parameters += [param for param in query.split('&') if not param.startswith('page')] + else: + raw_api_call = api_call + while next_page <= pagination['total_pages']: + raw_api_call += '?' + '&'.join(parameters) + result, status = self._cf_simple_api_call(raw_api_call, method, payload) + data += result['result'] + next_page += 1 + + return data, status + + def _get_zone_id(self, zone=None): + if not zone: + zone = self.zone + + zones = self.get_zones(zone) + if len(zones) > 1: + self.module.fail_json(msg="More than one zone matches {0}".format(zone)) + + if len(zones) < 1: + self.module.fail_json(msg="No zone found with name {0}".format(zone)) + + return zones[0]['id'] + + def get_zones(self, name=None): + if not name: + name = self.zone + param = '' + if name: + param = '?' + urlencode({'name': name}) + zones, status = self._cf_api_call('/zones' + param) + return zones + + def get_dns_records(self, zone_name=None, type=None, record=None, value=''): + if not zone_name: + zone_name = self.zone + if not type: + type = self.type + if not record: + record = self.record + # necessary because None as value means to override user + # set module value + if (not value) and (value is not None): + value = self.value + + zone_id = self._get_zone_id() + api_call = '/zones/{0}/dns_records'.format(zone_id) + query = {} + if type: + query['type'] = type + if record: + query['name'] = record + if value: + query['content'] = value + if query: + api_call += '?' + urlencode(query) + + records, status = self._cf_api_call(api_call) + return records + + def delete_dns_records(self, **kwargs): + params = {} + for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', + 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self, param) + + records = [] + content = params['value'] + search_record = params['record'] + if params['type'] == 'SRV': + if not (params['value'] is None or params['value'] == ''): + content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + elif params['type'] == 'DS': + if not (params['value'] is None or params['value'] == ''): + content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + elif params['type'] == 'SSHFP': + if not (params['value'] is None or params['value'] == ''): + content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + elif params['type'] == 'TLSA': + if not (params['value'] is None or params['value'] == ''): + content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + if params['solo']: + search_value = None + else: + search_value = content + + records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + + for rr in records: + if params['solo']: + if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + else: + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + return self.changed + + def ensure_dns_record(self, **kwargs): + params = {} + for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', + 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self, param) + + search_value = params['value'] + search_record = params['record'] + new_record = None + if (params['type'] is None) or (params['record'] is None): + self.module.fail_json(msg="You must provide a type and a record to create a new record") + + if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): + if not params['value']: + self.module.fail_json(msg="You must provide a non-empty value to create this record type") + + # there can only be one CNAME per record + # ignoring the value when searching for existing + # CNAME records allows us to update the value if it + # changes + if params['type'] == 'CNAME': + search_value = None + + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "ttl": params['ttl'] + } + + if (params['type'] in ['A', 'AAAA', 'CNAME']): + new_record["proxied"] = params["proxied"] + + if params['type'] == 'MX': + for attr in [params['priority'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide priority and a value to create this record type") + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "priority": params['priority'], + "ttl": params['ttl'] + } + + if params['type'] == 'SRV': + for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") + srv_data = { + "target": params['value'], + "port": params['port'], + "weight": params['weight'], + "priority": params['priority'], + "name": params['record'], + "proto": params['proto'], + "service": params['service'] + } + + new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} + search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + + if params['type'] == 'DS': + for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") + ds_data = { + "key_tag": params['key_tag'], + "algorithm": params['algorithm'], + "digest_type": params['hash_type'], + "digest": params['value'], + } + new_record = { + "type": params['type'], + "name": params['record'], + 'data': ds_data, + "ttl": params['ttl'], + } + search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + if params['type'] == 'SSHFP': + for attr in [params['algorithm'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") + sshfp_data = { + "fingerprint": params['value'], + "type": params['hash_type'], + "algorithm": params['algorithm'], + } + new_record = { + "type": params['type'], + "name": params['record'], + 'data': sshfp_data, + "ttl": params['ttl'], + } + search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + if params['type'] == 'TLSA': + for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") + search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + tlsa_data = { + "usage": params['cert_usage'], + "selector": params['selector'], + "matching_type": params['hash_type'], + "certificate": params['value'], + } + new_record = { + "type": params['type'], + "name": search_record, + 'data': tlsa_data, + "ttl": params['ttl'], + } + search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + + zone_id = self._get_zone_id(params['zone']) + records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + # in theory this should be impossible as cloudflare does not allow + # the creation of duplicate records but lets cover it anyways + if len(records) > 1: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # record already exists, check if it must be updated + if len(records) == 1: + cur_record = records[0] + do_update = False + if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): + do_update = True + if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + do_update = True + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): + do_update = True + if ('data' in new_record) and ('data' in cur_record): + if (cur_record['data'] != new_record['data']): + do_update = True + if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if do_update: + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record) + self.changed = True + return result, self.changed + else: + return records, self.changed + if self.module.check_mode: + result = new_record + else: + result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record) + self.changed = True + return result, self.changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_token=dict( + type="str", + required=False, + no_log=True, + fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), + ), + account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), + account_email=dict(type='str', required=False), + algorithm=dict(type='int'), + cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + hash_type=dict(type='int', choices=[1, 2]), + key_tag=dict(type='int', no_log=False), + port=dict(type='int'), + priority=dict(type='int', default=1), + proto=dict(type='str'), + proxied=dict(type='bool', default=False), + record=dict(type='str', default='@', aliases=['name']), + selector=dict(type='int', choices=[0, 1]), + service=dict(type='str'), + solo=dict(type='bool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + timeout=dict(type='int', default=30), + ttl=dict(type='int', default=1), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), + value=dict(type='str', aliases=['content']), + weight=dict(type='int', default=1), + zone=dict(type='str', required=True, aliases=['domain']), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['record', 'type', 'value']), + ('state', 'absent', ['record']), + ('type', 'SRV', ['proto', 'service']), + ('type', 'TLSA', ['proto', 'port']), + ], + ) + + if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): + module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") + if module.params['type'] == 'SRV': + if not ((module.params['weight'] is not None and module.params['port'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['weight'] is None and module.params['port'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.") + + if module.params['type'] == 'SSHFP': + if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'TLSA': + if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") + + if module.params['type'] == 'DS': + if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.") + + changed = False + cf_api = CloudflareAPI(module) + + # sanity checks + if cf_api.is_solo and cf_api.state == 'absent': + module.fail_json(msg="solo=true can only be used with state=present") + + # perform add, delete or update (only the TTL can be updated) of one or + # more records + if cf_api.state == 'present': + # delete all records matching record name + type + if cf_api.is_solo: + changed = cf_api.delete_dns_records(solo=cf_api.is_solo) + result, changed = cf_api.ensure_dns_record() + if isinstance(result, list): + module.exit_json(changed=changed, result={'record': result[0]}) + + module.exit_json(changed=changed, result={'record': result}) + else: + # force solo to False, just to be sure + changed = cf_api.delete_dns_records(solo=False) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/cobbler_sync.py new file mode 100644 index 000000000..d7acf4be6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cobbler_sync.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: cobbler_sync +short_description: Sync Cobbler +description: + - Sync Cobbler to commit changes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + type: str + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter C(use_ssl). + type: int + username: + description: + - The username to log in to Cobbler. + default: cobbler + type: str + password: + description: + - The password to log in to Cobbler. + type: str + use_ssl: + description: + - If C(false), an HTTP connection will be used instead of the default HTTPS connection. + type: bool + default: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. + - This should only set to C(false) when used on personally controlled sites using self-signed certificates. + type: bool + default: true +author: +- Dag Wieers (@dagwieers) +todo: +notes: +- Concurrently syncing Cobbler is bound to fail with weird errors. +- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. + More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). +''' + +EXAMPLES = r''' +- name: Commit Cobbler changes + community.general.cobbler_sync: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + run_once: true + delegate_to: localhost +''' + +RETURN = r''' +# Default return values +''' + +import datetime +import ssl + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client +from ansible.module_utils.common.text.converters import to_text + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=True, + ) + + start = datetime.datetime.utcnow() + + ssl_context = None + if not validate_certs: + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e))) + + if not module.check_mode: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) + + elapsed = datetime.datetime.utcnow() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cobbler_system.py b/ansible_collections/community/general/plugins/modules/cobbler_system.py new file mode 100644 index 000000000..c30b4f1c1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cobbler_system.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: cobbler_system +short_description: Manage system objects in Cobbler +description: + - Add, modify or remove systems in Cobbler +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + host: + description: + - The name or IP address of the Cobbler system. + default: 127.0.0.1 + type: str + port: + description: + - Port number to be used for REST connection. + - The default value depends on parameter C(use_ssl). + type: int + username: + description: + - The username to log in to Cobbler. + default: cobbler + type: str + password: + description: + - The password to log in to Cobbler. + type: str + use_ssl: + description: + - If C(false), an HTTP connection will be used instead of the default HTTPS connection. + type: bool + default: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. + - This should only set to C(false) when used on personally controlled sites using self-signed certificates. + type: bool + default: true + name: + description: + - The system name to manage. + type: str + properties: + description: + - A dictionary with system properties. + type: dict + interfaces: + description: + - A list of dictionaries containing interface options. + type: dict + sync: + description: + - Sync on changes. + - Concurrently syncing Cobbler is bound to fail. + type: bool + default: false + state: + description: + - Whether the system should be present, absent or a query is made. + choices: [ absent, present, query ] + default: present + type: str +author: +- Dag Wieers (@dagwieers) +notes: +- Concurrently syncing Cobbler is bound to fail with weird errors. +- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. + More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). +''' + +EXAMPLES = r''' +- name: Ensure the system exists in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + properties: + profile: CentOS6-x86_64 + name_servers: [ 2.3.4.5, 3.4.5.6 ] + name_servers_search: foo.com, bar.com + interfaces: + eth0: + macaddress: 00:01:02:03:04:05 + ipaddress: 1.2.3.4 + delegate_to: localhost + +- name: Enable network boot in Cobbler + community.general.cobbler_system: + host: bdsol-aci-cobbler-01 + username: cobbler + password: ins3965! + name: bdsol-aci51-apic1.cisco.com + properties: + netboot_enabled: true + state: present + delegate_to: localhost + +- name: Query all systems in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + state: query + register: cobbler_systems + delegate_to: localhost + +- name: Query a specific system in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: '{{ inventory_hostname }}' + state: query + register: cobbler_properties + delegate_to: localhost + +- name: Ensure the system does not exist in Cobbler + community.general.cobbler_system: + host: cobbler01 + username: cobbler + password: MySuperSecureP4sswOrd + name: myhost + state: absent + delegate_to: localhost +''' + +RETURN = r''' +systems: + description: List of systems + returned: I(state=query) and I(name) is not provided + type: list +system: + description: (Resulting) information about the system we are working with + returned: when I(name) is provided + type: dict +''' + +import datetime +import ssl + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import xmlrpc_client +from ansible.module_utils.common.text.converters import to_text + +IFPROPS_MAPPING = dict( + bondingopts='bonding_opts', + bridgeopts='bridge_opts', + connected_mode='connected_mode', + cnames='cnames', + dhcptag='dhcp_tag', + dnsname='dns_name', + ifgateway='if_gateway', + interfacetype='interface_type', + interfacemaster='interface_master', + ipaddress='ip_address', + ipv6address='ipv6_address', + ipv6defaultgateway='ipv6_default_gateway', + ipv6mtu='ipv6_mtu', + ipv6prefix='ipv6_prefix', + ipv6secondaries='ipv6_secondariesu', + ipv6staticroutes='ipv6_static_routes', + macaddress='mac_address', + management='management', + mtu='mtu', + netmask='netmask', + static='static', + staticroutes='static_routes', + virtbridge='virt_bridge', +) + + +def getsystem(conn, name, token): + system = dict() + if name: + # system = conn.get_system(name, token) + systems = conn.find_system(dict(name=name), token) + if systems: + system = systems[0] + return system + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', default='127.0.0.1'), + port=dict(type='int'), + username=dict(type='str', default='cobbler'), + password=dict(type='str', no_log=True), + use_ssl=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + name=dict(type='str'), + interfaces=dict(type='dict'), + properties=dict(type='dict'), + sync=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + ), + supports_check_mode=True, + ) + + username = module.params['username'] + password = module.params['password'] + port = module.params['port'] + use_ssl = module.params['use_ssl'] + validate_certs = module.params['validate_certs'] + + name = module.params['name'] + state = module.params['state'] + + module.params['proto'] = 'https' if use_ssl else 'http' + if not port: + module.params['port'] = '443' if use_ssl else '80' + + result = dict( + changed=False, + ) + + start = datetime.datetime.utcnow() + + ssl_context = None + if not validate_certs: + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context + + url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) + if ssl_context: + conn = xmlrpc_client.ServerProxy(url, context=ssl_context) + else: + conn = xmlrpc_client.Server(url) + + try: + token = conn.login(username, password) + except xmlrpc_client.Fault as e: + module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params)) + except Exception as e: + module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params)) + + system = getsystem(conn, name, token) + # result['system'] = system + + if state == 'query': + if name: + result['system'] = system + else: + # Turn it into a dictionary of dictionaries + # all_systems = conn.get_systems() + # result['systems'] = { system['name']: system for system in all_systems } + + # Return a list of dictionaries + result['systems'] = conn.get_systems() + + elif state == 'present': + + if system: + # Update existing entry + system_id = conn.get_system_handle(name, token) + + for key, value in iteritems(module.params['properties']): + if key not in system: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if system[key] != value: + try: + conn.modify_system(system_id, key, value, token) + result['changed'] = True + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + else: + # Create a new entry + system_id = conn.new_system(token) + conn.modify_system(system_id, 'name', name, token) + result['changed'] = True + + if module.params['properties']: + for key, value in iteritems(module.params['properties']): + try: + conn.modify_system(system_id, key, value, token) + except Exception as e: + module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e)) + + # Add interface properties + interface_properties = dict() + if module.params['interfaces']: + for device, values in iteritems(module.params['interfaces']): + for key, value in iteritems(values): + if key == 'name': + continue + if key not in IFPROPS_MAPPING: + module.warn("Property '{0}' is not a valid system property.".format(key)) + if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value: + result['changed'] = True + interface_properties['{0}-{1}'.format(key, device)] = value + + if result['changed'] is True: + conn.modify_system(system_id, "modify_interface", interface_properties, token) + + # Only save when the entry was changed + if not module.check_mode and result['changed']: + conn.save_system(system_id, token) + + elif state == 'absent': + + if system: + if not module.check_mode: + conn.remove_system(name, token) + result['changed'] = True + + if not module.check_mode and module.params['sync'] and result['changed']: + try: + conn.sync(token) + except Exception as e: + module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e))) + + if state in ('absent', 'present'): + result['system'] = getsystem(conn, name, token) + + if module._diff: + result['diff'] = dict(before=system, after=result['system']) + + elapsed = datetime.datetime.utcnow() - start + module.exit_json(elapsed=elapsed.seconds, **result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/composer.py b/ansible_collections/community/general/plugins/modules/composer.py new file mode 100644 index 000000000..793abcda1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/composer.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Dimitrios Tydeas Mengidis +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: composer +author: + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" +short_description: Dependency Manager for PHP +description: + - > + Composer is a tool for dependency management in PHP. It allows you to + declare the dependent libraries your project needs and it will install + them in your project for you. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + command: + type: str + description: + - Composer command like "install", "update" and so on. + default: install + arguments: + type: str + description: + - Composer arguments like required package, version and so on. + default: '' + executable: + type: path + description: + - Path to PHP Executable on the remote host, if PHP is not in PATH. + aliases: [ php_path ] + working_dir: + type: path + description: + - Directory of your project (see --working-dir). This is required when + the command is not run globally. + - Will be ignored if I(global_command=true). + global_command: + description: + - Runs the specified command globally. + type: bool + default: false + prefer_source: + description: + - Forces installation from package sources when possible (see --prefer-source). + default: false + type: bool + prefer_dist: + description: + - Forces installation from package dist even for dev versions (see --prefer-dist). + default: false + type: bool + no_dev: + description: + - Disables installation of require-dev packages (see --no-dev). + default: true + type: bool + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json (see --no-scripts). + default: false + type: bool + no_plugins: + description: + - Disables all plugins (see --no-plugins). + default: false + type: bool + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump (see --optimize-autoloader). + - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: true + type: bool + classmap_authoritative: + description: + - Autoload classes from classmap only. + - Implicitly enable optimize_autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: false + type: bool + apcu_autoloader: + description: + - Uses APCu to cache found/not-found classes + default: false + type: bool + ignore_platform_reqs: + description: + - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + default: false + type: bool + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. + version_added: 3.2.0 +requirements: + - php + - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) +notes: + - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. +''' + +EXAMPLES = ''' +- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock + community.general.composer: + command: install + working_dir: /path/to/project + +- name: Install a new package + community.general.composer: + command: require + arguments: my/package + working_dir: /path/to/project + +- name: Clone and install a project with all dependencies + community.general.composer: + command: create-project + arguments: package/package /path/to/project ~1.0 + working_dir: /path/to/project + prefer_dist: true + +- name: Install a package globally + community.general.composer: + command: require + global_command: true + arguments: my/package +''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +def parse_out(string): + return re.sub(r"\s+", " ", string).strip() + + +def has_changed(string): + for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]: + if no_change in string: + return False + + return True + + +def get_available_options(module, command='install'): + # get all available options from a composer command using composer help to json + rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) + + command_help_json = module.from_json(out) + return command_help_json['definition']['options'] + + +def composer_command(module, command, arguments="", options=None, global_command=False): + if options is None: + options = [] + + if module.params['executable'] is None: + php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) + else: + php_path = module.params['executable'] + + if module.params['composer_executable'] is None: + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + else: + composer_path = module.params['composer_executable'] + + cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) + return module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + command=dict(default="install", type="str"), + arguments=dict(default="", type="str"), + executable=dict(type="path", aliases=["php_path"]), + working_dir=dict(type="path"), + global_command=dict(default=False, type="bool"), + prefer_source=dict(default=False, type="bool"), + prefer_dist=dict(default=False, type="bool"), + no_dev=dict(default=True, type="bool"), + no_scripts=dict(default=False, type="bool"), + no_plugins=dict(default=False, type="bool"), + apcu_autoloader=dict(default=False, type="bool"), + optimize_autoloader=dict(default=True, type="bool"), + classmap_authoritative=dict(default=False, type="bool"), + ignore_platform_reqs=dict(default=False, type="bool"), + composer_executable=dict(type="path"), + ), + required_if=[('global_command', False, ['working_dir'])], + supports_check_mode=True + ) + + # Get composer command with fallback to default + command = module.params['command'] + if re.search(r"\s", command): + module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") + + arguments = module.params['arguments'] + global_command = module.params['global_command'] + available_options = get_available_options(module=module, command=command) + + options = [] + + # Default options + default_options = [ + 'no-ansi', + 'no-interaction', + 'no-progress', + ] + + for option in default_options: + if option in available_options: + option = "--%s" % option + options.append(option) + + if not global_command: + options.extend(['--working-dir', "'%s'" % module.params['working_dir']]) + + option_params = { + 'prefer_source': 'prefer-source', + 'prefer_dist': 'prefer-dist', + 'no_dev': 'no-dev', + 'no_scripts': 'no-scripts', + 'no_plugins': 'no-plugins', + 'apcu_autoloader': 'acpu-autoloader', + 'optimize_autoloader': 'optimize-autoloader', + 'classmap_authoritative': 'classmap-authoritative', + 'ignore_platform_reqs': 'ignore-platform-reqs', + } + + for param, option in option_params.items(): + if module.params.get(param) and option in available_options: + option = "--%s" % option + options.append(option) + + if module.check_mode: + if 'dry-run' in available_options: + options.append('--dry-run') + else: + module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) + + rc, out, err = composer_command(module, command, arguments, options, global_command) + + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output, stdout=err) + else: + # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages + output = parse_out(out + err) + module.exit_json(changed=has_changed(output), msg=output, stdout=out + err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/consul.py b/ansible_collections/community/general/plugins/modules/consul.py new file mode 100644 index 000000000..cc599be36 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/consul.py @@ -0,0 +1,635 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: consul +short_description: Add, modify & delete services within a consul cluster +description: + - Registers services and checks for an agent with a consul cluster. + A service is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. + - "Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:' + Node level checks require a I(check_name) and optionally a I(check_id)." + - Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An API method is planned to supply this metadata so at that + stage change management will be added. + - "See U(http://consul.io) for more details." +requirements: + - python-consul + - requests +author: "Steve Gargan (@sgargan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Register or deregister the consul service, defaults to present. + default: present + choices: ['present', 'absent'] + service_name: + type: str + description: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be omitted if registering + a node level check. + service_id: + type: str + description: + - The ID for the service, must be unique per node. If I(state=absent), + defaults to the service name if supplied. + host: + type: str + description: + - Host of the consul agent defaults to localhost. + default: localhost + port: + type: int + description: + - The port on which the consul agent is running. + default: 8500 + scheme: + type: str + description: + - The protocol scheme on which the consul agent is running. + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the consul agent. + type: bool + default: true + notes: + type: str + description: + - Notes to attach to check when registering it. + service_port: + type: int + description: + - The port on which the service is listening. Can optionally be supplied for + registration of a service, i.e. if I(service_name) or I(service_id) is set. + service_address: + type: str + description: + - The address to advertise that the service will be listening on. + This value will be passed as the I(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API + documentation for further details. + tags: + type: list + elements: str + description: + - Tags that will be attached to the service registration. + script: + type: str + description: + - The script/command that will be run periodically to check the health of the service. + - Requires I(interval) to be provided. + interval: + type: str + description: + - The interval at which the service check will be run. + This is a number with a C(s) or C(m) suffix to signify the units of seconds or minutes e.g C(15s) or C(1m). + If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). + - Required if one of the parameters I(script), I(http), or I(tcp) is specified. + check_id: + type: str + description: + - An ID for the service check. If I(state=absent), defaults to + I(check_name). Ignored if part of a service definition. + check_name: + type: str + description: + - Name for the service check. Required if standalone, ignored if + part of service definition. + ttl: + type: str + description: + - Checks can be registered with a ttl instead of a I(script) and I(interval) + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a C(s) or C(m) suffix to + signify the units of seconds or minutes e.g C(15s) or C(1m). + If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). + tcp: + type: str + description: + - Checks can be registered with a TCP port. This means that consul + will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). + The format is C(host:port), for example C(localhost:80). + - Requires I(interval) to be provided. + version_added: '1.3.0' + http: + type: str + description: + - Checks can be registered with an HTTP endpoint. This means that consul + will check that the http endpoint returns a successful HTTP status. + - Requires I(interval) to be provided. + timeout: + type: str + description: + - A custom HTTP check timeout. The consul default is 10 seconds. + Similar to the interval this is a number with a C(s) or C(m) suffix to + signify the units of seconds or minutes, e.g. C(15s) or C(1m). + If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). + token: + type: str + description: + - The token key identifying an ACL rule set. May be required to register services. + ack_params_state_absent: + type: bool + description: + - Disable deprecation warning when using parameters incompatible with I(state=absent). +''' + +EXAMPLES = ''' +- name: Register nginx service with the local consul agent + community.general.consul: + service_name: nginx + service_port: 80 + +- name: Register nginx service with curl check + community.general.consul: + service_name: nginx + service_port: 80 + script: curl http://localhost + interval: 60s + +- name: register nginx with a tcp check + community.general.consul: + service_name: nginx + service_port: 80 + interval: 60s + tcp: localhost:80 + +- name: Register nginx with an http check + community.general.consul: + service_name: nginx + service_port: 80 + interval: 60s + http: http://localhost:80/status + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul: + service_name: nginx + service_port: 80 + service_address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul: + service_name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Remove nginx service + community.general.consul: + service_name: nginx + state: absent + +- name: Register celery worker service + community.general.consul: + service_name: celery-worker + tags: + - prod + - worker + +- name: Create a node level check to test disk usage + community.general.consul: + check_name: Disk usage + check_id: disk_usage + script: /opt/disk_usage.py + interval: 5m + +- name: Register an http check against a service that's already registered + community.general.consul: + check_name: nginx-check2 + check_id: nginx-check2 + service_id: nginx + interval: 60s + http: http://localhost:80/morestatus +''' + +try: + import consul + from requests.exceptions import ConnectionError + + class PatchedConsulAgentService(consul.Consul.Agent.Service): + def deregister(self, service_id, token=None): + params = {} + if token: + params['token'] = token + return self.agent.http.put(consul.base.CB.bool(), + '/v1/agent/service/deregister/%s' % service_id, + params=params) + + python_consul_installed = True +except ImportError: + python_consul_installed = False + +import re +from ansible.module_utils.basic import AnsibleModule + + +def register_with_consul(module): + state = module.params['state'] + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params['service_id'] or module.params['service_name'] + check_id = module.params['check_id'] or module.params['check_name'] + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name and not check.service_id: + module.fail_json(msg='a check name is required for a node level check, one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl, + tcp=check.tcp, + http=check.http, + timeout=check.timeout, + service_id=check.service_id) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id_or_name(consul_api, service.id) + + # there is no way to retrieve the details of checks so if a check is present + # in the service it must be re-registered + if service.has_checks() or not existing or not existing == service: + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id_or_name(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=[check.to_dict() for check in service.checks()], + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id_or_name(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id, token=module.params['token']) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module): + consulClient = consul.Consul(host=module.params['host'], + port=module.params['port'], + scheme=module.params['scheme'], + verify=module.params['validate_certs'], + token=module.params['token']) + consulClient.agent.service = PatchedConsulAgentService(consulClient) + return consulClient + + +def get_service_by_id_or_name(consul_api, service_id_or_name): + ''' iterate the registered services and find one with the given id ''' + for dummy, service in consul_api.agent.services().items(): + if service_id_or_name in (service['ID'], service['Service']): + return ConsulService(loaded=service) + + +def parse_check(module): + _checks = [module.params[p] for p in ('script', 'ttl', 'tcp', 'http') if module.params[p]] + + if len(_checks) > 1: + module.fail_json( + msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense') + + if module.params['check_id'] or _checks: + return ConsulCheck( + module.params['check_id'], + module.params['check_name'], + module.params['check_node'], + module.params['check_host'], + module.params['script'], + module.params['interval'], + module.params['ttl'], + module.params['notes'], + module.params['tcp'], + module.params['http'], + module.params['timeout'], + module.params['service_id'], + ) + + +def parse_service(module): + return ConsulService( + module.params['service_id'], + module.params['service_name'], + module.params['service_address'], + module.params['service_port'], + module.params['tags'], + ) + + +class ConsulService(object): + + def __init__(self, service_id=None, name=None, address=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.address = address + self.port = port + self.tags = tags + self._checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + optional = {} + + if self.port: + optional['port'] = self.port + + if len(self._checks) > 0: + optional['check'] = self._checks[0].check + + consul_api.agent.service.register( + self.name, + service_id=self.id, + address=self.address, + tags=self.tags, + **optional) + + def add_check(self, check): + self._checks.append(check) + + def checks(self): + return self._checks + + def has_checks(self): + return len(self._checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.id == other.id and + self.name == other.name and + self.port == other.port and + self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self._checks) > 0: + data['check'] = self._checks[0].to_dict() + return data + + +class ConsulCheck(object): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.service_id = service_id + self.notes = notes + self.node = node + self.host = host + + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.script = script + self.tcp = tcp + self.http = http + self.timeout = self.validate_duration('timeout', timeout) + + self.check = None + + if script: + self.check = consul.Check.script(script, self.interval) + + if ttl: + self.check = consul.Check.ttl(self.ttl) + + if http: + if interval is None: + raise Exception('http check must specify interval') + + self.check = consul.Check.http(http, self.interval, self.timeout) + + if tcp: + if interval is None: + raise Exception('tcp check must specify interval') + + regex = r"(?P.*):(?P(?:[0-9]+))$" + match = re.match(regex, tcp) + + if not match: + raise Exception('tcp check must be in host:port format') + + self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) + + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any(duration.endswith(suffix) for suffix in duration_units): + duration = "{0}s".format(duration) + return duration + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, + notes=self.notes, + check=self.check) + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.check_id == other.check_id and + self.service_id == other.service_id and + self.name == other.name and + self.script == other.script and + self.interval == other.interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + self._add(data, 'tcp') + self._add(data, 'http') + self._add(data, 'timeout') + self._add(data, 'service_id') + return data + + def _add(self, data, key, attr=None): + try: + if attr is None: + attr = key + data[key] = getattr(self, attr) + except Exception: + pass + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + scheme=dict(default='http'), + validate_certs=dict(default=True, type='bool'), + check_id=dict(), + check_name=dict(), + check_node=dict(), + check_host=dict(), + notes=dict(), + script=dict(), + service_id=dict(), + service_name=dict(), + service_address=dict(type='str'), + service_port=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + interval=dict(type='str'), + ttl=dict(type='str'), + tcp=dict(type='str'), + http=dict(type='str'), + timeout=dict(type='str'), + tags=dict(type='list', elements='str'), + token=dict(no_log=True), + ack_params_state_absent=dict(type='bool'), + ), + required_if=[ + ('state', 'present', ['service_name']), + ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True), + ], + supports_check_mode=False, + ) + p = module.params + + test_dependencies(module) + if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']) and not p['ack_params_state_absent']: + module.deprecate( + "The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is deprecated. " + "In community.general 8.0.0 their use will become an error. " + "To suppress this deprecation notice, set parameter ack_params_state_absent=true.", + version="8.0.0", + collection_name="community.general", + ) + # When reaching c.g 8.0.0: + # - Replace the deprecation with a fail_json(), remove the "ack_params_state_absent" condition from the "if" + # - Add mutually_exclusive for ('script', 'ttl', 'tcp', 'http'), then remove that validation from parse_check() + # - Add required_by {'script': 'interval', 'http': 'interval', 'tcp': 'interval'}, then remove checks for 'interval' in ConsulCheck.__init__() + # - Deprecate the parameter ack_params_state_absent + + try: + register_with_consul(module) + except SystemExit: + raise + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e))) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/consul_acl.py b/ansible_collections/community/general/plugins/modules/consul_acl.py new file mode 100644 index 000000000..91f955228 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/consul_acl.py @@ -0,0 +1,691 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: consul_acl +short_description: Manipulate Consul ACL keys and rules +description: + - Allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/guides/acl.html. +author: + - Steve Gargan (@sgargan) + - Colin Nolan (@colin-nolan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + required: true + type: str + state: + description: + - whether the ACL pair should be present or absent + required: false + choices: ['present', 'absent'] + default: present + type: str + token_type: + description: + - the type of token that should be created + choices: ['client', 'management'] + default: client + type: str + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + type: str + token: + description: + - the token key identifying an ACL rule set. If generated by consul + this will be a UUID + required: false + type: str + rules: + type: list + elements: dict + description: + - rules that should be associated with a given token + required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + type: str + port: + type: int + description: + - the port on which the consul agent is running + required: false + default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + type: str + validate_certs: + type: bool + description: + - whether to verify the tls certificate of the consul agent + required: false + default: true +requirements: + - python-consul + - pyhcl + - requests +''' + +EXAMPLES = """ +- name: Create an ACL with rules + community.general.consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + rules: + - key: "foo" + policy: read + - key: "private/foo" + policy: deny + +- name: Create an ACL with a specific token + community.general.consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + token: my-token + rules: + - key: "foo" + policy: read + +- name: Update the rules associated to an ACL token + community.general.consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + name: Foo access + token: some_client_token + rules: + - event: "bbq" + policy: write + - key: "foo" + policy: read + - key: "private" + policy: deny + - keyring: write + - node: "hgs4" + policy: write + - operator: read + - query: "" + policy: write + - service: "consul" + policy: write + - session: "standup" + policy: write + +- name: Remove a token + community.general.consul_acl: + host: consul1.example.com + mgmt_token: some_management_acl + token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e + state: absent +""" + +RETURN = """ +token: + description: the token associated to the ACL (the ACL's ID) + returned: success + type: str + sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da +rules: + description: the HCL JSON representation of the rules associated to the ACL, in the format described in the + Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). + returned: I(status) == "present" + type: dict + sample: { + "key": { + "foo": { + "policy": "write" + }, + "bar": { + "policy": "deny" + } + } + } +operation: + description: the operation performed on the ACL + returned: changed + type: str + sample: update +""" + + +try: + import consul + python_consul_installed = True +except ImportError: + python_consul_installed = False + +try: + import hcl + pyhcl_installed = True +except ImportError: + pyhcl_installed = False + +try: + from requests.exceptions import ConnectionError + has_requests = True +except ImportError: + has_requests = False + +from collections import defaultdict +from ansible.module_utils.basic import to_text, AnsibleModule + + +RULE_SCOPES = [ + "agent", + "agent_prefix", + "event", + "event_prefix", + "key", + "key_prefix", + "keyring", + "node", + "node_prefix", + "operator", + "query", + "query_prefix", + "service", + "service_prefix", + "session", + "session_prefix", +] + +MANAGEMENT_PARAMETER_NAME = "mgmt_token" +HOST_PARAMETER_NAME = "host" +SCHEME_PARAMETER_NAME = "scheme" +VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" +NAME_PARAMETER_NAME = "name" +PORT_PARAMETER_NAME = "port" +RULES_PARAMETER_NAME = "rules" +STATE_PARAMETER_NAME = "state" +TOKEN_PARAMETER_NAME = "token" +TOKEN_TYPE_PARAMETER_NAME = "token_type" + +PRESENT_STATE_VALUE = "present" +ABSENT_STATE_VALUE = "absent" + +CLIENT_TOKEN_TYPE_VALUE = "client" +MANAGEMENT_TOKEN_TYPE_VALUE = "management" + +REMOVE_OPERATION = "remove" +UPDATE_OPERATION = "update" +CREATE_OPERATION = "create" + +_POLICY_JSON_PROPERTY = "policy" +_RULES_JSON_PROPERTY = "Rules" +_TOKEN_JSON_PROPERTY = "ID" +_TOKEN_TYPE_JSON_PROPERTY = "Type" +_NAME_JSON_PROPERTY = "Name" +_POLICY_YML_PROPERTY = "policy" +_POLICY_HCL_PROPERTY = "policy" + +_ARGUMENT_SPEC = { + MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), + HOST_PARAMETER_NAME: dict(default='localhost'), + SCHEME_PARAMETER_NAME: dict(default='http'), + VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), + NAME_PARAMETER_NAME: dict(), + PORT_PARAMETER_NAME: dict(default=8500, type='int'), + RULES_PARAMETER_NAME: dict(type='list', elements='dict'), + STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), + TOKEN_PARAMETER_NAME: dict(no_log=False), + TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], + default=CLIENT_TOKEN_TYPE_VALUE) +} + + +def set_acl(consul_client, configuration): + """ + Sets an ACL based on the given configuration. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of setting the ACL + """ + acls_as_json = decode_acls_as_json(consul_client.acl.list()) + existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) + existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) + if None in existing_acls_mapped_by_token: + raise AssertionError("expecting ACL list to be associated to a token: %s" % + existing_acls_mapped_by_token[None]) + + if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: + # No token but name given so can get token from name + configuration.token = existing_acls_mapped_by_name[configuration.name].token + + if configuration.token and configuration.token in existing_acls_mapped_by_token: + return update_acl(consul_client, configuration) + else: + if configuration.token in existing_acls_mapped_by_token: + raise AssertionError() + if configuration.name in existing_acls_mapped_by_name: + raise AssertionError() + return create_acl(consul_client, configuration) + + +def update_acl(consul_client, configuration): + """ + Updates an ACL. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of the update + """ + existing_acl = load_acl_with_token(consul_client, configuration.token) + changed = existing_acl.rules != configuration.rules + + if changed: + name = configuration.name if configuration.name is not None else existing_acl.name + rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) + updated_token = consul_client.acl.update( + configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) + if updated_token != configuration.token: + raise AssertionError() + + return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) + + +def create_acl(consul_client, configuration): + """ + Creates an ACL. + :param consul_client: the consul client + :param configuration: the run configuration + :return: the output of the creation + """ + rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None + token = consul_client.acl.create( + name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) + rules = configuration.rules + return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) + + +def remove_acl(consul, configuration): + """ + Removes an ACL. + :param consul: the consul client + :param configuration: the run configuration + :return: the output of the removal + """ + token = configuration.token + changed = consul.acl.info(token) is not None + if changed: + consul.acl.destroy(token) + return Output(changed=changed, token=token, operation=REMOVE_OPERATION) + + +def load_acl_with_token(consul, token): + """ + Loads the ACL with the given token (token == rule ID). + :param consul: the consul client + :param token: the ACL "token"/ID (not name) + :return: the ACL associated to the given token + :exception ConsulACLTokenNotFoundException: raised if the given token does not exist + """ + acl_as_json = consul.acl.info(token) + if acl_as_json is None: + raise ConsulACLNotFoundException(token) + return decode_acl_as_json(acl_as_json) + + +def encode_rules_as_hcl_string(rules): + """ + Converts the given rules into the equivalent HCL (string) representation. + :param rules: the rules + :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal + note for justification) + """ + if len(rules) == 0: + # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty + # string if there is no rules... + return None + rules_as_hcl = "" + for rule in rules: + rules_as_hcl += encode_rule_as_hcl_string(rule) + return rules_as_hcl + + +def encode_rule_as_hcl_string(rule): + """ + Converts the given rule into the equivalent HCL (string) representation. + :param rule: the rule + :return: the equivalent HCL (string) representation of the rule + """ + if rule.pattern is not None: + return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) + else: + return '%s = "%s"\n' % (rule.scope, rule.policy) + + +def decode_rules_as_hcl_string(rules_as_hcl): + """ + Converts the given HCL (string) representation of rules into a list of rule domain models. + :param rules_as_hcl: the HCL (string) representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules_as_hcl = to_text(rules_as_hcl) + rules_as_json = hcl.loads(rules_as_hcl) + return decode_rules_as_json(rules_as_json) + + +def decode_rules_as_json(rules_as_json): + """ + Converts the given JSON representation of rules into a list of rule domain models. + :param rules_as_json: the JSON representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules = RuleCollection() + for scope in rules_as_json: + if not isinstance(rules_as_json[scope], dict): + rules.add(Rule(scope, rules_as_json[scope])) + else: + for pattern, policy in rules_as_json[scope].items(): + rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) + return rules + + +def encode_rules_as_json(rules): + """ + Converts the given rules into the equivalent JSON representation according to the documentation: + https://www.consul.io/docs/guides/acl.html#rule-specification. + :param rules: the rules + :return: JSON representation of the given rules + """ + rules_as_json = defaultdict(dict) + for rule in rules: + if rule.pattern is not None: + if rule.pattern in rules_as_json[rule.scope]: + raise AssertionError() + rules_as_json[rule.scope][rule.pattern] = { + _POLICY_JSON_PROPERTY: rule.policy + } + else: + if rule.scope in rules_as_json: + raise AssertionError() + rules_as_json[rule.scope] = rule.policy + return rules_as_json + + +def decode_rules_as_yml(rules_as_yml): + """ + Converts the given YAML representation of rules into a list of rule domain models. + :param rules_as_yml: the YAML representation of a collection of rules + :return: the equivalent domain model to the given rules + """ + rules = RuleCollection() + if rules_as_yml: + for rule_as_yml in rules_as_yml: + rule_added = False + for scope in RULE_SCOPES: + if scope in rule_as_yml: + if rule_as_yml[scope] is None: + raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) + policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ + else rule_as_yml[scope] + pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None + rules.add(Rule(scope, policy, pattern)) + rule_added = True + break + if not rule_added: + raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) + return rules + + +def decode_acl_as_json(acl_as_json): + """ + Converts the given JSON representation of an ACL into the equivalent domain model. + :param acl_as_json: the JSON representation of an ACL + :return: the equivalent domain model to the given ACL + """ + rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] + rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ + else RuleCollection() + return ACL( + rules=rules, + token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], + token=acl_as_json[_TOKEN_JSON_PROPERTY], + name=acl_as_json[_NAME_JSON_PROPERTY] + ) + + +def decode_acls_as_json(acls_as_json): + """ + Converts the given JSON representation of ACLs into a list of ACL domain models. + :param acls_as_json: the JSON representation of a collection of ACLs + :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) + """ + return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] + + +class ConsulACLNotFoundException(Exception): + """ + Exception raised if an ACL with is not found. + """ + + +class Configuration: + """ + Configuration for this module. + """ + + def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, + rules=None, state=None, token=None, token_type=None): + self.management_token = management_token # type: str + self.host = host # type: str + self.scheme = scheme # type: str + self.validate_certs = validate_certs # type: bool + self.name = name # type: str + self.port = port # type: int + self.rules = rules # type: RuleCollection + self.state = state # type: str + self.token = token # type: str + self.token_type = token_type # type: str + + +class Output: + """ + Output of an action of this module. + """ + + def __init__(self, changed=None, token=None, rules=None, operation=None): + self.changed = changed # type: bool + self.token = token # type: str + self.rules = rules # type: RuleCollection + self.operation = operation # type: str + + +class ACL: + """ + Consul ACL. See: https://www.consul.io/docs/guides/acl.html. + """ + + def __init__(self, rules, token_type, token, name): + self.rules = rules + self.token_type = token_type + self.token = token + self.name = name + + def __eq__(self, other): + return other \ + and isinstance(other, self.__class__) \ + and self.rules == other.rules \ + and self.token_type == other.token_type \ + and self.token == other.token \ + and self.name == other.name + + def __hash__(self): + return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) + + +class Rule: + """ + ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. + """ + + def __init__(self, scope, policy, pattern=None): + self.scope = scope + self.policy = policy + self.pattern = pattern + + def __eq__(self, other): + return isinstance(other, self.__class__) \ + and self.scope == other.scope \ + and self.policy == other.policy \ + and self.pattern == other.pattern + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) + + def __str__(self): + return encode_rule_as_hcl_string(self) + + +class RuleCollection: + """ + Collection of ACL rules, which are part of a Consul ACL. + """ + + def __init__(self): + self._rules = {} + for scope in RULE_SCOPES: + self._rules[scope] = {} + + def __iter__(self): + all_rules = [] + for scope, pattern_keyed_rules in self._rules.items(): + for pattern, rule in pattern_keyed_rules.items(): + all_rules.append(rule) + return iter(all_rules) + + def __len__(self): + count = 0 + for scope in RULE_SCOPES: + count += len(self._rules[scope]) + return count + + def __eq__(self, other): + return isinstance(other, self.__class__) \ + and set(self) == set(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __str__(self): + return encode_rules_as_hcl_string(self) + + def add(self, rule): + """ + Adds the given rule to this collection. + :param rule: model of a rule + :raises ValueError: raised if there already exists a rule for a given scope and pattern + """ + if rule.pattern in self._rules[rule.scope]: + patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" + raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) + self._rules[rule.scope][rule.pattern] = rule + + +def get_consul_client(configuration): + """ + Gets a Consul client for the given configuration. + + Does not check if the Consul client can connect. + :param configuration: the run configuration + :return: Consul client + """ + token = configuration.management_token + if token is None: + token = configuration.token + if token is None: + raise AssertionError("Expecting the management token to always be set") + return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, + verify=configuration.validate_certs, token=token) + + +def check_dependencies(): + """ + Checks that the required dependencies have been imported. + :exception ImportError: if it is detected that any of the required dependencies have not been imported + """ + if not python_consul_installed: + raise ImportError("python-consul required for this module. " + "See: https://python-consul.readthedocs.io/en/latest/#installation") + + if not pyhcl_installed: + raise ImportError("pyhcl required for this module. " + "See: https://pypi.org/project/pyhcl/") + + if not has_requests: + raise ImportError("requests required for this module. See https://pypi.org/project/requests/") + + +def main(): + """ + Main method. + """ + module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) + + try: + check_dependencies() + except ImportError as e: + module.fail_json(msg=str(e)) + + configuration = Configuration( + management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), + host=module.params.get(HOST_PARAMETER_NAME), + scheme=module.params.get(SCHEME_PARAMETER_NAME), + validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), + name=module.params.get(NAME_PARAMETER_NAME), + port=module.params.get(PORT_PARAMETER_NAME), + rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), + state=module.params.get(STATE_PARAMETER_NAME), + token=module.params.get(TOKEN_PARAMETER_NAME), + token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) + ) + consul_client = get_consul_client(configuration) + + try: + if configuration.state == PRESENT_STATE_VALUE: + output = set_acl(consul_client, configuration) + else: + output = remove_acl(consul_client, configuration) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + configuration.host, configuration.port, str(e))) + raise + + return_values = dict(changed=output.changed, token=output.token, operation=output.operation) + if output.rules is not None: + return_values["rules"] = encode_rules_as_json(output.rules) + module.exit_json(**return_values) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/consul_kv.py b/ansible_collections/community/general/plugins/modules/consul_kv.py new file mode 100644 index 000000000..a4457f244 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/consul_kv.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Steve Gargan +# Copyright (c) 2018 Genome Research Ltd. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: consul_kv +short_description: Manipulate entries in the key/value store of a consul cluster +description: + - Allows the retrieval, addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as C(value). + - If the C(key) represents a prefix then note that when a value is removed, the existing + value if any is returned as part of the results. + - See http://www.consul.io/docs/agent/http.html#kv for more details. +requirements: + - python-consul + - requests +author: + - Steve Gargan (@sgargan) + - Colin Nolan (@colin-nolan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - The action to take with the supplied key and value. If the state is C(present) and I(value) is set, the key + contents will be set to the value supplied and C(changed) will be set to C(true) only if the value was + different to the current contents. If the state is C(present) and I(value) is not set, the existing value + associated to the key will be returned. The state C(absent) will remove the key/value pair, + again C(changed) will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states C(acquire) or + C(release) respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + type: str + choices: [ absent, acquire, present, release ] + default: present + key: + description: + - The key at which the value should be stored. + type: str + required: true + value: + description: + - The value should be associated with the given key, required if C(state) + is C(present). + type: str + recurse: + description: + - If the key represents a prefix, each entry with the prefix can be + retrieved by setting this to C(true). + type: bool + retrieve: + description: + - If the I(state) is C(present) and I(value) is set, perform a + read after setting the value and return this value. + default: true + type: bool + session: + description: + - The session that should be used to acquire or release a lock + associated with a key/value pair. + type: str + token: + description: + - The token key identifying an ACL rule set that controls access to + the key value pair + type: str + cas: + description: + - Used when acquiring a lock with a session. If the C(cas) is C(0), then + Consul will only put the key if it does not already exist. If the + C(cas) value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + type: str + flags: + description: + - Opaque positive integer value that can be passed when setting a value. + type: str + host: + description: + - Host of the consul agent. + type: str + default: localhost + port: + description: + - The port on which the consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the tls certificate of the consul agent. + type: bool + default: true +''' + + +EXAMPLES = ''' +# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` +# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` +- name: Retrieve a value from the key/value store + community.general.consul_kv: + key: somekey + register: retrieved_key + +- name: Add or update the value associated with a key in the key/value store + community.general.consul_kv: + key: somekey + value: somevalue + +- name: Remove a key from the store + community.general.consul_kv: + key: somekey + state: absent + +- name: Add a node to an arbitrary group via consul inventory (see consul.ini) + community.general.consul_kv: + key: ansible/groups/dc1/somenode + value: top_secret + +- name: Register a key/value pair with an associated session + community.general.consul_kv: + key: stg/node/server_birthday + value: 20160509 + session: "{{ sessionid }}" + state: acquire +''' + +from ansible.module_utils.common.text.converters import to_text + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + +# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a +# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, +# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) +NOT_SET = None + + +def _has_value_changed(consul_client, key, target_value): + """ + Uses the given Consul client to determine if the value associated to the given key is different to the given target + value. + :param consul_client: Consul connected client + :param key: key in Consul + :param target_value: value to be associated to the key + :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the + value has changed (i.e. the stored value is not the target value) + """ + index, existing = consul_client.kv.get(key) + if not existing: + return index, True + try: + changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value + return index, changed + except UnicodeError: + # Existing value was not decodable but all values we set are valid utf-8 + return index, True + + +def execute(module): + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + elif state == 'present': + if module.params.get('value') is NOT_SET: + get_value(module) + else: + set_value(module) + elif state == 'absent': + remove_value(module) + else: + module.exit_json(msg="Unsupported state: %s" % (state, )) + + +def lock(module, state): + + consul_api = get_consul_api(module) + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + if state == 'acquire': + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=changed, + index=index, + key=key) + + +def get_value(module): + consul_api = get_consul_api(module) + key = module.params.get('key') + + index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse')) + + module.exit_json(changed=False, index=index, data=existing_value) + + +def set_value(module): + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + if value is NOT_SET: + raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key) + + index, changed = _has_value_changed(consul_api, key, value) + + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + stored = None + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing is not None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), + token=module.params.get('token')) + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cas=dict(type='str'), + flags=dict(type='str'), + key=dict(type='str', required=True, no_log=False), + host=dict(type='str', default='localhost'), + scheme=dict(type='str', default='http'), + validate_certs=dict(type='bool', default=True), + port=dict(type='int', default=8500), + recurse=dict(type='bool'), + retrieve=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']), + token=dict(type='str', no_log=True), + value=dict(type='str', default=NOT_SET), + session=dict(type='str'), + ), + supports_check_mode=True + ) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), e)) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py new file mode 100644 index 000000000..246d13846 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/consul_session.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: consul_session +short_description: Manipulate consul sessions +description: + - Allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found at http://www.consul.io/docs/internals/sessions.html +requirements: + - python-consul + - requests +author: + - Steve Gargan (@sgargan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + id: + description: + - ID of the session, required when I(state) is either C(info) or + C(remove). + type: str + state: + description: + - Whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the I(id) for the + session is returned in the output. If C(absent), I(id) is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying C(info), C(node) or C(list) for the I(state); for C(node) + or C(info), the node I(name) or session I(id) is required as parameter. + choices: [ absent, info, list, node, present ] + type: str + default: present + name: + description: + - The name that should be associated with the session. Required when + I(state=node) is used. + type: str + delay: + description: + - The optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. Durations are in seconds. + type: int + default: 15 + node: + description: + - The name of the node that with which the session will be associated. + by default this is the name of the agent. + type: str + datacenter: + description: + - The name of the datacenter in which the session exists or should be + created. + type: str + checks: + description: + - Checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. + type: list + elements: str + host: + description: + - The host of the consul agent defaults to localhost. + type: str + default: localhost + port: + description: + - The port on which the consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the consul agent. + type: bool + default: true + behavior: + description: + - The optional behavior that can be attached to the session when it + is created. This controls the behavior when a session is invalidated. + choices: [ delete, release ] + type: str + default: release + ttl: + description: + - Specifies the duration of a session in seconds (between 10 and 86400). + type: int + version_added: 5.4.0 + token: + description: + - The token key identifying an ACL rule set that controls access to + the key value pair. + type: str + version_added: 5.6.0 +''' + +EXAMPLES = ''' +- name: Register basic session with consul + community.general.consul_session: + name: session1 + +- name: Register a session with an existing check + community.general.consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: Register a session with lock_delay + community.general.consul_session: + name: session_with_delay + delay: 20s + +- name: Retrieve info about session by id + community.general.consul_session: + id: session_id + state: info + +- name: Retrieve active sessions + community.general.consul_session: + state: list + +- name: Register session with a ttl + community.general.consul_session: + name: session-with-ttl + ttl: 600 # sec +''' + +try: + import consul + from requests.exceptions import ConnectionError + python_consul_installed = True +except ImportError: + python_consul_installed = False + +from ansible.module_utils.basic import AnsibleModule + + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul_client = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul_client.session.list(dc=datacenter) + # Ditch the index, this can be grabbed from the results + if sessions_list and len(sessions_list) >= 2: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + sessions = consul_client.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + + session_by_id = consul_client.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception as e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + behavior = module.params.get('behavior') + ttl = module.params.get('ttl') + + consul_client = get_consul_api(module) + + try: + session = consul_client.session.create( + name=name, + behavior=behavior, + ttl=ttl, + node=node, + lock_delay=delay, + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + behavior=behavior, + ttl=ttl, + delay=delay, + checks=checks, + node=node) + except Exception as e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + consul_client = get_consul_api(module) + + try: + consul_client.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception as e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), + token=module.params.get('token')) + + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. " + "see https://python-consul.readthedocs.io/en/latest/#installation") + + +def main(): + argument_spec = dict( + checks=dict(type='list', elements='str'), + delay=dict(type='int', default='15'), + behavior=dict(type='str', default='release', choices=['release', 'delete']), + ttl=dict(type='int'), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=8500), + scheme=dict(type='str', default='http'), + validate_certs=dict(type='bool', default=True), + id=dict(type='str'), + name=dict(type='str'), + node=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), + datacenter=dict(type='str'), + token=dict(type='str', no_log=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'node', ['name']), + ('state', 'info', ['id']), + ('state', 'remove', ['id']), + ], + supports_check_mode=False + ) + + test_dependencies(module) + + try: + execute(module) + except ConnectionError as e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), e)) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/copr.py b/ansible_collections/community/general/plugins/modules/copr.py new file mode 100644 index 000000000..965c2a935 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/copr.py @@ -0,0 +1,500 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Silvie Chlupova +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: copr +short_description: Manage one of the Copr repositories +version_added: 2.0.0 +description: This module can enable, disable or remove the specified repository. +author: Silvie Chlupova (@schlupov) +requirements: + - dnf + - dnf-plugins-core +notes: + - Supports C(check_mode). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + host: + description: The Copr host to work with. + default: copr.fedorainfracloud.org + type: str + protocol: + description: This indicate which protocol to use with the host. + default: https + type: str + name: + description: Copr directory name, for example C(@copr/copr-dev). + required: true + type: str + state: + description: + - Whether to set this project as C(enabled), C(disabled) or C(absent). + default: enabled + type: str + choices: [absent, enabled, disabled] + chroot: + description: + - The name of the chroot that you want to enable/disable/remove in the project, + for example C(epel-7-x86_64). Default chroot is determined by the operating system, + version of the operating system, and architecture on which the module is run. + type: str +""" + +EXAMPLES = r""" +- name: Enable project Test of the user schlupov + community.general.copr: + host: copr.fedorainfracloud.org + state: enabled + name: schlupov/Test + chroot: fedora-31-x86_64 + +- name: Remove project integration_tests of the group copr + community.general.copr: + state: absent + name: '@copr/integration_tests' +""" + +RETURN = r""" +repo_filename: + description: The name of the repo file in which the copr project information is stored. + returned: success + type: str + sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo + +repo: + description: Path to the project on the host. + returned: success + type: str + sample: copr.fedorainfracloud.org/group_copr/integration_tests +""" + +import stat +import os +import traceback + +try: + import dnf + import dnf.cli + import dnf.repodict + from dnf.conf import Conf + HAS_DNF_PACKAGES = True + DNF_IMP_ERR = None +except ImportError: + DNF_IMP_ERR = traceback.format_exc() + HAS_DNF_PACKAGES = False + +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils import distro # pylint: disable=import-error +from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error +from ansible.module_utils.urls import open_url # pylint: disable=import-error + + +class CoprModule(object): + """The class represents a copr module. + + The class contains methods that take care of the repository state of a project, + whether the project is enabled, disabled or missing. + """ + + ansible_module = None + + def __init__(self, host, name, state, protocol, chroot=None, check_mode=False): + self.host = host + self.name = name + self.state = state + self.chroot = chroot + self.protocol = protocol + self.check_mode = check_mode + if not chroot: + self.chroot = self.chroot_conf() + else: + self.chroot = chroot + self.get_base() + + @property + def short_chroot(self): + """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" + return self.chroot.rsplit('-', 1)[0] + + @property + def arch(self): + """str: Target architecture.""" + chroot_parts = self.chroot.split("-") + return chroot_parts[-1] + + @property + def user(self): + """str: Copr user (this can also be the name of the group).""" + return self._sanitize_username(self.name.split("/")[0]) + + @property + def project(self): + """str: The name of the copr project.""" + return self.name.split("/")[1] + + @classmethod + def need_root(cls): + """Check if the module was run as root.""" + if os.geteuid() != 0: + cls.raise_exception("This command has to be run under the root user.") + + @classmethod + def get_base(cls): + """Initialize the configuration from dnf. + + Returns: + An instance of the BaseCli class. + """ + cls.base = dnf.cli.cli.BaseCli(Conf()) + return cls.base + + @classmethod + def raise_exception(cls, msg): + """Raise either an ansible exception or a python exception. + + Args: + msg: The message to be displayed when an exception is thrown. + """ + if cls.ansible_module: + raise cls.ansible_module.fail_json(msg=msg, changed=False) + raise Exception(msg) + + def _get(self, chroot): + """Send a get request to the server to obtain the necessary data. + + Args: + chroot: Chroot in the form of distribution-version. + + Returns: + Info about a repository and status code of the get request. + """ + repo_info = None + url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format( + self.protocol, self.host, self.name, chroot, self.arch + ) + try: + r = open_url(url) + status_code = r.getcode() + repo_info = r.read().decode("utf-8") + except HTTPError as e: + status_code = e.getcode() + return repo_info, status_code + + def _download_repo_info(self): + """Download information about the repository. + + Returns: + Information about the repository. + """ + distribution, version = self.short_chroot.split('-', 1) + chroot = self.short_chroot + while True: + repo_info, status_code = self._get(chroot) + if repo_info: + return repo_info + if distribution == "rhel": + chroot = "centos-stream-8" + distribution = "centos" + elif distribution == "centos": + if version == "stream-8": + version = "8" + elif version == "stream-9": + version = "9" + chroot = "epel-{0}".format(version) + distribution = "epel" + else: + if str(status_code) != "404": + self.raise_exception( + "This repository does not have any builds yet so you cannot enable it now." + ) + else: + self.raise_exception( + "Chroot {0} does not exist in {1}".format(self.chroot, self.name) + ) + + def _enable_repo(self, repo_filename_path, repo_content=None): + """Write information to a repo file. + + Args: + repo_filename_path: Path to repository. + repo_content: Repository information from the host. + + Returns: + True, if the information in the repo file matches that stored on the host, + False otherwise. + """ + if not repo_content: + repo_content = self._download_repo_info() + if self._compare_repo_content(repo_filename_path, repo_content): + return False + if not self.check_mode: + with open(repo_filename_path, "w+") as file: + file.write(repo_content) + os.chmod( + repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH, + ) + return True + + def _get_repo_with_old_id(self): + """Try to get a repository with the old name.""" + repo_id = "{0}-{1}".format(self.user, self.project) + if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile: + file_name = self.base.repos[repo_id].repofile.split("/")[-1] + try: + copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1] + if copr_hostname != self.host: + return None + return file_name + except IndexError: + return file_name + return None + + def _read_all_repos(self, repo_id=None): + """The method is used to initialize the base variable by + repositories using the RepoReader class from dnf. + + Args: + repo_id: Repo id of the repository we want to work with. + """ + reader = dnf.conf.read.RepoReader(self.base.conf, None) + for repo in reader: + try: + if repo_id: + if repo.id == repo_id: + self.base.repos.add(repo) + break + else: + self.base.repos.add(repo) + except dnf.exceptions.ConfigError as e: + self.raise_exception(str(e)) + + def _get_copr_repo(self): + """Return one specific repository from all repositories on the system. + + Returns: + The repository that a user wants to enable, disable, or remove. + """ + repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project) + if repo_id not in self.base.repos: + if self._get_repo_with_old_id() is None: + return None + return self.base.repos[repo_id] + + def _disable_repo(self, repo_filename_path): + """Disable the repository. + + Args: + repo_filename_path: Path to repository. + + Returns: + False, if the repository is already disabled on the system, + True otherwise. + """ + self._read_all_repos() + repo = self._get_copr_repo() + if repo is None: + if self.check_mode: + return True + self._enable_repo(repo_filename_path) + self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project)) + repo = self._get_copr_repo() + for repo_id in repo.cfg.sections(): + repo_content_api = self._download_repo_info() + with open(repo_filename_path, "r") as file: + repo_content_file = file.read() + if repo_content_file != repo_content_api: + if not self.resolve_differences( + repo_content_file, repo_content_api, repo_filename_path + ): + return False + if not self.check_mode: + self.base.conf.write_raw_configfile( + repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"}, + ) + return True + + def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path): + """Detect differences between the contents of the repository stored on the + system and the information about the repository on the server. + + Args: + repo_content_file: The contents of the repository stored on the system. + repo_content_api: The information about the repository from the server. + repo_filename_path: Path to repository. + + Returns: + False, if the contents of the repo file and the information on the server match, + True otherwise. + """ + repo_file_lines = repo_content_file.split("\n") + repo_api_lines = repo_content_api.split("\n") + repo_api_lines.remove("enabled=1") + if "enabled=0" in repo_file_lines: + repo_file_lines.remove("enabled=0") + if " ".join(repo_api_lines) == " ".join(repo_file_lines): + return False + if not self.check_mode: + os.remove(repo_filename_path) + self._enable_repo(repo_filename_path, repo_content_api) + else: + repo_file_lines.remove("enabled=1") + if " ".join(repo_api_lines) != " ".join(repo_file_lines): + if not self.check_mode: + os.remove(repo_filename_path) + self._enable_repo(repo_filename_path, repo_content_api) + return True + + def _remove_repo(self): + """Remove the required repository. + + Returns: + True, if the repository has been removed, False otherwise. + """ + self._read_all_repos() + repo = self._get_copr_repo() + if not repo: + return False + if not self.check_mode: + try: + os.remove(repo.repofile) + except OSError as e: + self.raise_exception(str(e)) + return True + + def run(self): + """The method uses methods of the CoprModule class to change the state of the repository. + + Returns: + Dictionary with information that the ansible module displays to the user at the end of the run. + """ + self.need_root() + state = dict() + repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project) + state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project) + state["repo_filename"] = repo_filename + repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format( + self.base.conf.get_reposdir, self.host, self.user, self.project + ) + if self.state == "enabled": + enabled = self._enable_repo(repo_filename_path) + state["msg"] = "enabled" + state["state"] = bool(enabled) + elif self.state == "disabled": + disabled = self._disable_repo(repo_filename_path) + state["msg"] = "disabled" + state["state"] = bool(disabled) + elif self.state == "absent": + removed = self._remove_repo() + state["msg"] = "absent" + state["state"] = bool(removed) + return state + + @staticmethod + def _compare_repo_content(repo_filename_path, repo_content_api): + """Compare the contents of the stored repository with the information from the server. + + Args: + repo_filename_path: Path to repository. + repo_content_api: The information about the repository from the server. + + Returns: + True, if the information matches, False otherwise. + """ + if not os.path.isfile(repo_filename_path): + return False + with open(repo_filename_path, "r") as file: + repo_content_file = file.read() + return repo_content_file == repo_content_api + + @staticmethod + def chroot_conf(): + """Obtain information about the distribution, version, and architecture of the target. + + Returns: + Chroot info in the form of distribution-version-architecture. + """ + (distribution, version, codename) = distro.linux_distribution(full_distribution_name=False) + base = CoprModule.get_base() + return "{0}-{1}-{2}".format(distribution, version, base.conf.arch) + + @staticmethod + def _sanitize_username(user): + """Modify the group name. + + Args: + user: User name. + + Returns: + Modified user name if it is a group name with @. + """ + if user[0] == "@": + return "group_{0}".format(user[1:]) + return user + + +def run_module(): + """The function takes care of the functioning of the whole ansible copr module.""" + module_args = dict( + host=dict(type="str", default="copr.fedorainfracloud.org"), + protocol=dict(type="str", default="https"), + name=dict(type="str", required=True), + state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), + chroot=dict(type="str"), + ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + params = module.params + + if not HAS_DNF_PACKAGES: + module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR) + + CoprModule.ansible_module = module + copr_module = CoprModule( + host=params["host"], + name=params["name"], + state=params["state"], + protocol=params["protocol"], + chroot=params["chroot"], + check_mode=module.check_mode, + ) + state = copr_module.run() + + info = "Please note that this repository is not part of the main distribution" + + if params["state"] == "enabled" and state["state"]: + module.exit_json( + changed=state["state"], + msg=state["msg"], + repo=state["repo"], + repo_filename=state["repo_filename"], + info=info, + ) + module.exit_json( + changed=state["state"], + msg=state["msg"], + repo=state["repo"], + repo_filename=state["repo_filename"], + ) + + +def main(): + """Launches ansible Copr module.""" + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py new file mode 100644 index 000000000..6260992df --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cpanm.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Franck Cuny +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: cpanm +short_description: Manages Perl library dependencies +description: + - Manage Perl library dependencies using cpanminus. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - The Perl library to install. Valid values change according to the I(mode), see notes for more details. + - Note that for installing from a local path the parameter I(from_path) should be used. + aliases: [pkg] + from_path: + type: path + description: + - The local directory or C(tar.gz) file to install from. + notest: + description: + - Do not run unit tests. + type: bool + default: false + locallib: + description: + - Specify the install base to install modules. + type: path + mirror: + description: + - Specifies the base URL for the CPAN mirror to use. + type: str + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB. + type: bool + default: false + installdeps: + description: + - Only install dependencies. + type: bool + default: false + version: + description: + - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted. + type: str + executable: + description: + - Override the path to the cpanm executable. + type: path + mode: + description: + - Controls the module behavior. See notes below for more details. + type: str + choices: [compatibility, new] + default: compatibility + version_added: 3.0.0 + name_check: + description: + - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified). + type: str + version_added: 3.0.0 +notes: + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. + - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)." + - "C(compatibility) mode:" + - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode. + - I(name) must be either a module name or a distribution file. + - > + If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens. + Otherwise, it will be installed using the C(cpanm) executable. + - I(name) cannot be an URL, or a git URL. + - C(cpanm) version specifiers do not work in this mode. + - "C(new) mode:" + - "When using C(new) mode, the module will behave differently" + - > + The I(name) parameter may refer to a module name, a distribution file, + a HTTP URL or a git repository URL as described in C(cpanminus) documentation. + - C(cpanm) version specifiers are recognized. +author: + - "Franck Cuny (@fcuny)" + - "Alexei Znamensky (@russoz)" +''' + +EXAMPLES = ''' +- name: Install Dancer perl package + community.general.cpanm: + name: Dancer + +- name: Install version 0.99_05 of the Plack perl package + community.general.cpanm: + name: MIYAGAWA/Plack-0.99_05.tar.gz + +- name: Install Dancer into the specified locallib + community.general.cpanm: + name: Dancer + locallib: /srv/webapps/my_app/extlib + +- name: Install perl dependencies from local directory + community.general.cpanm: + from_path: /srv/webapps/my_app/src/ + +- name: Install Dancer perl package without running the unit tests in indicated locallib + community.general.cpanm: + name: Dancer + notest: true + locallib: /srv/webapps/my_app/extlib + +- name: Install Dancer perl package from a specific mirror + community.general.cpanm: + name: Dancer + mirror: 'http://cpan.cpantesters.org/' + +- name: Install Dancer perl package into the system root path + become: true + community.general.cpanm: + name: Dancer + +- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 + community.general.cpanm: + name: Dancer + version: '1.0' +''' + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class CPANMinus(ModuleHelper): + output_params = ['name', 'version'] + module = dict( + argument_spec=dict( + name=dict(type='str', aliases=['pkg']), + version=dict(type='str'), + from_path=dict(type='path'), + notest=dict(type='bool', default=False), + locallib=dict(type='path'), + mirror=dict(type='str'), + mirror_only=dict(type='bool', default=False), + installdeps=dict(type='bool', default=False), + executable=dict(type='path'), + mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'), + name_check=dict(type='str') + ), + required_one_of=[('name', 'from_path')], + + ) + command = 'cpanm' + command_args_formats = dict( + notest=cmd_runner_fmt.as_bool("--notest"), + locallib=cmd_runner_fmt.as_opt_val('--local-lib'), + mirror=cmd_runner_fmt.as_opt_val('--mirror'), + mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), + installdeps=cmd_runner_fmt.as_bool("--installdeps"), + pkg_spec=cmd_runner_fmt.as_list(), + ) + + def __init_module__(self): + v = self.vars + if v.mode == "compatibility": + if v.name_check: + self.do_raise("Parameter name_check can only be used with mode=new") + else: + if v.name and v.from_path: + self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") + + self.command = self.get_bin_path(v.executable if v.executable else self.command) + self.vars.set("binary", self.command) + + def _is_package_installed(self, name, locallib, version): + def process(rc, out, err): + return rc == 0 + + if name is None or name.endswith('.tar.gz'): + return False + version = "" if version is None else " " + version + + env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} + runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env) + with runner("mod", output_process=process) as ctx: + return ctx.run(mod='use %s%s;' % (name, version)) + + def sanitize_pkg_spec_version(self, pkg_spec, version): + if version is None: + return pkg_spec + if pkg_spec.endswith('.tar.gz'): + self.do_raise(msg="parameter 'version' must not be used when installing from a file") + if os.path.isdir(pkg_spec): + self.do_raise(msg="parameter 'version' must not be used when installing from a directory") + if pkg_spec.endswith('.git'): + if version.startswith('~'): + self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository") + version = version if version.startswith('@') else '@' + version + elif version[0] not in ('@', '~'): + version = '~' + version + return pkg_spec + version + + def __run__(self): + def process(rc, out, err): + if self.vars.mode == "compatibility" and rc != 0: + self.do_raise(msg=err, cmd=self.vars.cmd_args) + return 'is up to date' not in err and 'is up to date' not in out + + runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) + + v = self.vars + pkg_param = 'from_path' if v.from_path else 'name' + + if v.mode == 'compatibility': + if self._is_package_installed(v.name, v.locallib, v.version): + return + pkg_spec = v[pkg_param] + else: + installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False + if installed: + return + pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) + + with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx: + self.changed = ctx.run(pkg_spec=pkg_spec) + + +def main(): + CPANMinus.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/cronvar.py b/ansible_collections/community/general/plugins/modules/cronvar.py new file mode 100644 index 000000000..7effed2ae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/cronvar.py @@ -0,0 +1,431 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Cronvar Plugin: The goal of this plugin is to provide an idempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable + +# This module is based on the crontab module. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. + - This module allows you to create, update, or delete cron variable definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the crontab variable. + type: str + required: true + value: + description: + - The value to set this variable to. + - Required if I(state=present). + type: str + insertafter: + description: + - If specified, the variable will be inserted after the variable specified. + - Used with I(state=present). + type: str + insertbefore: + description: + - Used with I(state=present). If specified, the variable will be inserted + just before the variable specified. + type: str + state: + description: + - Whether to ensure that the variable is present or absent. + type: str + choices: [ absent, present ] + default: present + user: + description: + - The specific user whose crontab should be modified. + - This parameter defaults to C(root) when unset. + type: str + cron_file: + description: + - If specified, uses this file instead of an individual user's crontab. + - Without a leading C(/), this is assumed to be in I(/etc/cron.d). + - With a leading C(/), this is taken as absolute. + type: str + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + type: bool + default: false +requirements: + - cron +author: +- Doug Luce (@dougluce) +''' + +EXAMPLES = r''' +- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists + community.general.cronvar: + name: EMAIL + value: doug@ansibmod.con.com + +- name: Ensure a variable does not exist. This may remove any variable named "LEGACY" + community.general.cronvar: + name: LEGACY + state: absent + +- name: Add a variable to a file under /etc/cron.d + community.general.cronvar: + name: LOGFILE + value: /var/log/yum-autoupdate.log + user: root + cron_file: ansible_yum-autoupdate +''' + +import os +import platform +import pwd +import re +import shlex +import sys +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +class CronVarError(Exception): + pass + + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',)) + self.cron_cmd = self.module.get_bin_path('crontab', required=True) + + if cron_file: + self.cron_file = "" + if os.path.isabs(cron_file): + self.cron_file = cron_file + else: + self.cron_file = os.path.join('/etc/cron.d', cron_file) + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError: + # cron file does not exist + return + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l + ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + self.module.debug('ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError: + # cron file does not exist + return False + except Exception: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + var_name, dummy = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + varname, dummy = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + varname, dummy = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd)) + elif platform.system() == 'AIX': + return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % ( + shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path)) + elif pwd.getpwuid(os.getuid())[0] != self.user: + user = '-u %s' % shlex_quote(self.user) + return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path)) + + +# ================================================== + +def main(): + # The following example playbooks: + # + # - community.general.cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # community.general.cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str'), + user=dict(type='str'), + cron_file=dict(type='str'), + insertafter=dict(type='str'), + insertbefore=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + backup=dict(type='bool', default=False), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(int('022', 8)) + cronvar = CronVar(module, user, cron_file) + + module.debug('cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variable") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + dummy, backup_file = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/crypttab.py b/ansible_collections/community/general/plugins/modules/crypttab.py new file mode 100644 index 000000000..6aea362e7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/crypttab.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Steve +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: crypttab +short_description: Encrypted Linux block devices +description: + - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or + optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) + will be stripped from I(name). + type: str + required: true + state: + description: + - Use I(present) to add a line to C(/etc/crypttab) or update its definition + if already present. + - Use I(absent) to remove a line with matching I(name). + - Use I(opts_present) to add options to those already present; options with + different values will be updated. + - Use I(opts_absent) to remove options from the existing set. + type: str + required: true + choices: [ absent, opts_absent, opts_present, present ] + backing_device: + description: + - Path to the underlying block device or file, or the UUID of a block-device + prefixed with I(UUID=). + type: str + password: + description: + - Encryption password, the path to a file containing the password, or + C(-) or unset if the password should be entered at boot. + type: path + opts: + description: + - A comma-delimited list of options. See C(crypttab(5) ) for details. + type: str + path: + description: + - Path to file to use instead of C(/etc/crypttab). + - This might be useful in a chroot environment. + type: path + default: /etc/crypttab +author: +- Steve (@groks) +''' + +EXAMPLES = r''' +- name: Set the options explicitly a device which must already exist + community.general.crypttab: + name: luks-home + state: present + opts: discard,cipher=aes-cbc-essiv:sha256 + +- name: Add the 'discard' option to any existing options for all devices + community.general.crypttab: + name: '{{ item.device }}' + state: opts_present + opts: discard + loop: '{{ ansible_mounts }}' + when: "'/dev/mapper/luks-' in {{ item.device }}" +''' + +import os +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']), + backing_device=dict(type='str'), + password=dict(type='path'), + opts=dict(type='str'), + path=dict(type='path', default='/etc/crypttab') + ), + supports_check_mode=True, + ) + + backing_device = module.params['backing_device'] + password = module.params['password'] + opts = module.params['opts'] + state = module.params['state'] + path = module.params['path'] + name = module.params['name'] + if name.startswith('/dev/mapper/'): + name = name[len('/dev/mapper/'):] + + if state != 'absent' and backing_device is None and password is None and opts is None: + module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", + **module.params) + + if 'opts' in state and (backing_device is not None or password is not None): + module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state, + **module.params) + + for arg_name, arg in (('name', name), + ('backing_device', backing_device), + ('password', password), + ('opts', opts)): + if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): + module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, + **module.params) + + try: + crypttab = Crypttab(path) + existing_line = crypttab.match(name) + except Exception as e: + module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e), + exception=traceback.format_exc(), **module.params) + + if 'present' in state and existing_line is None and backing_device is None: + module.fail_json(msg="'backing_device' required to add a new entry", + **module.params) + + changed, reason = False, '?' + + if state == 'absent': + if existing_line is not None: + changed, reason = existing_line.remove() + + elif state == 'present': + if existing_line is not None: + changed, reason = existing_line.set(backing_device, password, opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_present': + if existing_line is not None: + changed, reason = existing_line.opts.add(opts) + else: + changed, reason = crypttab.add(Line(None, name, backing_device, password, opts)) + + elif state == 'opts_absent': + if existing_line is not None: + changed, reason = existing_line.opts.remove(opts) + + if changed and not module.check_mode: + try: + f = open(path, 'wb') + f.write(to_bytes(crypttab, errors='surrogate_or_strict')) + finally: + f.close() + + module.exit_json(changed=changed, msg=reason, **module.params) + + +class Crypttab(object): + _lines = [] + + def __init__(self, path): + self.path = path + if not os.path.exists(path): + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + open(path, 'a').close() + + try: + f = open(path, 'r') + for line in f.readlines(): + self._lines.append(Line(line)) + finally: + f.close() + + def add(self, line): + self._lines.append(line) + return True, 'added line' + + def lines(self): + for line in self._lines: + if line.valid(): + yield line + + def match(self, name): + for line in self.lines(): + if line.name == name: + return line + return None + + def __str__(self): + lines = [] + for line in self._lines: + lines.append(str(line)) + crypttab = '\n'.join(lines) + if len(crypttab) == 0: + crypttab += '\n' + if crypttab[-1] != '\n': + crypttab += '\n' + return crypttab + + +class Line(object): + def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None): + self.line = line + self.name = name + self.backing_device = backing_device + self.password = password + self.opts = Options(opts) + + if line is not None: + self.line = self.line.rstrip('\n') + if self._line_valid(line): + self.name, backing_device, password, opts = self._split_line(line) + + self.set(backing_device, password, opts) + + def set(self, backing_device, password, opts): + changed = False + + if backing_device is not None and self.backing_device != backing_device: + self.backing_device = backing_device + changed = True + + if password is not None and self.password != password: + self.password = password + changed = True + + if opts is not None: + opts = Options(opts) + if opts != self.opts: + self.opts = opts + changed = True + + return changed, 'updated line' + + def _line_valid(self, line): + if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4): + return False + return True + + def _split_line(self, line): + fields = line.split() + try: + field2 = fields[2] + except IndexError: + field2 = None + try: + field3 = fields[3] + except IndexError: + field3 = None + + return (fields[0], + fields[1], + field2, + field3) + + def remove(self): + self.line, self.name, self.backing_device = '', None, None + return True, 'removed line' + + def valid(self): + if self.name is not None and self.backing_device is not None: + return True + return False + + def __str__(self): + if self.valid(): + fields = [self.name, self.backing_device] + if self.password is not None or self.opts: + if self.password is not None: + fields.append(self.password) + else: + fields.append('none') + if self.opts: + fields.append(str(self.opts)) + return ' '.join(fields) + return self.line + + +class Options(dict): + """opts_string looks like: 'discard,foo=bar,baz=greeble' """ + + def __init__(self, opts_string): + super(Options, self).__init__() + self.itemlist = [] + if opts_string is not None: + for opt in opts_string.split(','): + kv = opt.split('=') + if len(kv) > 1: + k, v = (kv[0], kv[1]) + else: + k, v = (kv[0], None) + self[k] = v + + def add(self, opts_string): + changed = False + for k, v in Options(opts_string).items(): + if k in self: + if self[k] != v: + changed = True + else: + changed = True + self[k] = v + return changed, 'updated options' + + def remove(self, opts_string): + changed = False + for k in Options(opts_string): + if k in self: + del self[k] + changed = True + return changed, 'removed options' + + def keys(self): + return self.itemlist + + def values(self): + return [self[key] for key in self] + + def items(self): + return [(key, self[key]) for key in self] + + def __iter__(self): + return iter(self.itemlist) + + def __setitem__(self, key, value): + if key not in self: + self.itemlist.append(key) + super(Options, self).__setitem__(key, value) + + def __delitem__(self, key): + self.itemlist.remove(key) + super(Options, self).__delitem__(key) + + def __ne__(self, obj): + return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items())) + + def __str__(self): + ret = [] + for k, v in self.items(): + if v is None: + ret.append(k) + else: + ret.append('%s=%s' % (k, v)) + return ','.join(ret) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/datadog_downtime.py b/ansible_collections/community/general/plugins/modules/datadog_downtime.py new file mode 100644 index 000000000..6e506eb85 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/datadog_downtime.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Datadog, Inc +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: datadog_downtime +short_description: Manages Datadog downtimes +version_added: 2.0.0 +description: + - Manages downtimes within Datadog. + - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/). +author: + - Datadog (@Datadog) +requirements: + - datadog-api-client + - Python 3.6+ +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. + - This value can also be set with the C(DATADOG_HOST) environment variable. + required: false + default: https://api.datadoghq.com + type: str + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the downtime. + required: false + choices: ["present", "absent"] + default: present + type: str + id: + description: + - The identifier of the downtime. + - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state). + - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. + type: int + monitor_tags: + description: + - A list of monitor tags to which the downtime applies. + - The resulting downtime applies to monitors that match ALL provided monitor tags. + type: list + elements: str + scope: + description: + - A list of scopes to which the downtime applies. + - The resulting downtime applies to sources that matches ALL provided scopes. + type: list + elements: str + monitor_id: + description: + - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. + type: int + downtime_message: + description: + - A message to include with notifications for this downtime. + - Email notifications can be sent to specific users by using the same "@username" notation as events. + type: str + start: + type: int + description: + - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. + end: + type: int + description: + - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. + timezone: + description: + - The timezone for the downtime. + type: str + rrule: + description: + - The C(RRULE) standard for defining recurring events. + - For example, to have a recurring event on the first day of each month, + select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1). + - Most common rrule options from the iCalendar Spec are supported. + - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)). + type: str +""" + +EXAMPLES = """ + - name: Create a downtime + register: downtime_var + community.general.datadog_downtime: + state: present + monitor_tags: + - "foo:bar" + downtime_message: "Downtime for foo:bar" + scope: "test" + api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created + id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" + - name: Save downtime id to file for later updates and idempotence + delegate_to: localhost + copy: + content: "{{ downtime.downtime.id }}" + dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" +""" + +RETURN = """ +# Returns the downtime JSON dictionary from the API response under the C(downtime) key. +# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. +downtime: + description: The downtime returned by the API. + type: dict + returned: always + sample: { + "active": true, + "canceled": null, + "creator_id": 1445416, + "disabled": false, + "downtime_type": 2, + "end": null, + "id": 1055751000, + "message": "Downtime for foo:bar", + "monitor_id": null, + "monitor_tags": [ + "foo:bar" + ], + "parent_id": null, + "recurrence": null, + "scope": [ + "test" + ], + "start": 1607015009, + "timezone": "UTC", + "updater_id": null + } +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +# Import Datadog + +DATADOG_IMP_ERR = None +HAS_DATADOG = True +try: + from datadog_api_client.v1 import Configuration, ApiClient, ApiException + from datadog_api_client.v1.api.downtimes_api import DowntimesApi + from datadog_api_client.v1.model.downtime import Downtime + from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence +except ImportError: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_host=dict(required=False, default="https://api.datadoghq.com"), + app_key=dict(required=True, no_log=True), + state=dict(required=False, choices=["present", "absent"], default="present"), + monitor_tags=dict(required=False, type="list", elements="str"), + scope=dict(required=False, type="list", elements="str"), + monitor_id=dict(required=False, type="int"), + downtime_message=dict(required=False, no_log=True), + start=dict(required=False, type="int"), + end=dict(required=False, type="int"), + timezone=dict(required=False, type="str"), + rrule=dict(required=False, type="str"), + id=dict(required=False, type="int"), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR) + + configuration = Configuration( + host=module.params["api_host"], + api_key={ + "apiKeyAuth": module.params["api_key"], + "appKeyAuth": module.params["app_key"] + } + ) + with ApiClient(configuration) as api_client: + api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format( + api_client.user_agent + ) + api_instance = DowntimesApi(api_client) + + # Validate api and app keys + try: + api_instance.list_downtimes(current_only=True) + except ApiException as e: + module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e)) + + if module.params["state"] == "present": + schedule_downtime(module, api_client) + elif module.params["state"] == "absent": + cancel_downtime(module, api_client) + + +def _get_downtime(module, api_client): + api = DowntimesApi(api_client) + downtime = None + if module.params["id"]: + try: + downtime = api.get_downtime(module.params["id"]) + except ApiException as e: + module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e)) + return downtime + + +def build_downtime(module): + downtime = Downtime() + if module.params["monitor_tags"]: + downtime.monitor_tags = module.params["monitor_tags"] + if module.params["scope"]: + downtime.scope = module.params["scope"] + if module.params["monitor_id"]: + downtime.monitor_id = module.params["monitor_id"] + if module.params["downtime_message"]: + downtime.message = module.params["downtime_message"] + if module.params["start"]: + downtime.start = module.params["start"] + if module.params["end"]: + downtime.end = module.params["end"] + if module.params["timezone"]: + downtime.timezone = module.params["timezone"] + if module.params["rrule"]: + downtime.recurrence = DowntimeRecurrence( + rrule=module.params["rrule"] + ) + return downtime + + +def _post_downtime(module, api_client): + api = DowntimesApi(api_client) + downtime = build_downtime(module) + try: + resp = api.create_downtime(downtime) + module.params["id"] = resp.id + module.exit_json(changed=True, downtime=resp.to_dict()) + except ApiException as e: + module.fail_json(msg="Failed to create downtime: {0}".format(e)) + + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + + +def _update_downtime(module, current_downtime, api_client): + api = DowntimesApi(api_client) + downtime = build_downtime(module) + try: + if current_downtime.disabled: + resp = api.create_downtime(downtime) + else: + resp = api.update_downtime(module.params["id"], downtime) + if _equal_dicts( + resp.to_dict(), + current_downtime.to_dict(), + ["active", "creator_id", "updater_id"] + ): + module.exit_json(changed=False, downtime=resp.to_dict()) + else: + module.exit_json(changed=True, downtime=resp.to_dict()) + except ApiException as e: + module.fail_json(msg="Failed to update downtime: {0}".format(e)) + + +def schedule_downtime(module, api_client): + downtime = _get_downtime(module, api_client) + if downtime is None: + _post_downtime(module, api_client) + else: + _update_downtime(module, downtime, api_client) + + +def cancel_downtime(module, api_client): + downtime = _get_downtime(module, api_client) + api = DowntimesApi(api_client) + if downtime is None: + module.exit_json(changed=False) + try: + api.cancel_downtime(downtime["id"]) + except ApiException as e: + module.fail_json(msg="Failed to create downtime: {0}".format(e)) + + module.exit_json(changed=True) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/datadog_event.py b/ansible_collections/community/general/plugins/modules/datadog_event.py new file mode 100644 index 000000000..b8161eca6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/datadog_event.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Author: Artūras 'arturaz' Šlajus +# Author: Naoya Nakazawa +# +# This module is proudly sponsored by iGeolise (www.igeolise.com) and +# Tiny Lab Productions (www.tinylabproductions.com). +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: datadog_event +short_description: Posts events to Datadog service +description: + - "Allows to post events to Datadog (www.datadoghq.com) service." + - "Uses http://docs.datadoghq.com/api/#events API." +author: + - "Artūras 'arturaz' Šlajus (@arturaz)" + - "Naoya Nakazawa (@n0ts)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: ["Your DataDog API key."] + required: true + app_key: + type: str + description: ["Your DataDog app key."] + required: true + title: + type: str + description: ["The event title."] + required: true + text: + type: str + description: ["The body of the event."] + required: true + date_happened: + type: int + description: + - POSIX timestamp of the event. + - Default value is now. + priority: + type: str + description: ["The priority of the event."] + default: normal + choices: [normal, low] + host: + type: str + description: + - Host name to associate with the event. + - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' + tags: + type: list + elements: str + description: ["Comma separated list of tags to apply to the event."] + alert_type: + type: str + description: ["Type of alert."] + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + type: str + description: ["An arbitrary string to use for aggregation."] + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true +''' + +EXAMPLES = ''' +- name: Post an event with low priority + community.general.datadog_event: + title: Testing from ansible + text: Test + priority: low + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + +- name: Post an event with several tags + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + tags: 'aa,bb,#host:{{ inventory_hostname }}' + +- name: Post an event with several tags to another endpoint + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + api_host: 'https://example.datadoghq.eu' + tags: + - aa + - b + - '#host:{{ inventory_hostname }}' + +''' + +import platform +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + app_key=dict(required=True, no_log=True), + api_host=dict(type='str'), + title=dict(required=True), + text=dict(required=True), + date_happened=dict(type='int'), + priority=dict(default='normal', choices=['normal', 'low']), + host=dict(), + tags=dict(type='list', elements='str'), + alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), + aggregation_key=dict(no_log=False), + validate_certs=dict(default=True, type='bool'), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'], + } + if module.params['api_host'] is not None: + options['api_host'] = module.params['api_host'] + + initialize(**options) + + _post_event(module) + + +def _post_event(module): + try: + if module.params['host'] is None: + module.params['host'] = platform.node().split('.')[0] + msg = api.Event.create(title=module.params['title'], + text=module.params['text'], + host=module.params['host'], + tags=module.params['tags'], + priority=module.params['priority'], + alert_type=module.params['alert_type'], + aggregation_key=module.params['aggregation_key'], + source_type_name='ansible') + if msg['status'] != 'ok': + module.fail_json(msg=msg) + + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/ansible_collections/community/general/plugins/modules/datadog_monitor.py new file mode 100644 index 000000000..f58df358b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/datadog_monitor.py @@ -0,0 +1,428 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Sebastian Kornehl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: datadog_monitor +short_description: Manages Datadog monitors +description: + - Manages monitors within Datadog. + - Options as described on https://docs.datadoghq.com/api/. + - The type C(event-v2) was added in community.general 4.8.0. +author: Sebastian Kornehl (@skornehl) +requirements: [datadog] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). + - This value can also be set with the C(DATADOG_HOST) environment variable. + required: false + type: str + version_added: '0.2.0' + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the monitor. + required: true + choices: ['present', 'absent', 'mute', 'unmute'] + type: str + tags: + description: + - A list of tags to associate with your monitor when creating or updating. + - This can help you categorize and filter monitors. + type: list + elements: str + type: + description: + - The type of the monitor. + - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. + - The type C(composite) was added in community.general 3.4.0. + choices: + - metric alert + - service check + - event alert + - event-v2 alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite + type: str + query: + description: + - The monitor query to notify on. + - Syntax varies depending on what type of monitor you are creating. + type: str + name: + description: + - The name of the alert. + required: true + type: str + notification_message: + description: + - A message to include with notifications for this monitor. + - Email notifications can be sent to specific users by using the same '@username' notation as events. + - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. + type: str + silenced: + type: dict + description: + - Dictionary of scopes to silence, with timestamps or None. + - Each scope will be muted until the given POSIX timestamp or forever if the value is None. + notify_no_data: + description: + - Whether this monitor will notify when data stops reporting. + type: bool + default: false + no_data_timeframe: + description: + - The number of minutes before a monitor will notify when data stops reporting. + - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. + - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. + type: str + timeout_h: + description: + - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. + type: str + renotify_interval: + description: + - The number of minutes after the last notification before a monitor will re-notify on the current status. + - It will only re-notify if it is not resolved. + type: str + escalation_message: + description: + - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. + - Not applicable if I(renotify_interval=None). + type: str + notify_audit: + description: + - Whether tagged users will be notified on changes to this monitor. + type: bool + default: false + thresholds: + type: dict + description: + - A dictionary of thresholds by status. + - Only available for service checks and metric alerts. + - Because each of them can have multiple thresholds, we do not define them directly in the query. + - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})." + locked: + description: + - Whether changes to this monitor should be restricted to the creator or admins. + type: bool + default: false + require_full_window: + description: + - Whether this monitor needs a full window of data before it gets evaluated. + - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. + type: bool + new_host_delay: + description: + - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. + - This gives the host time to fully initialize. + type: str + evaluation_delay: + description: + - Time to delay evaluation (in seconds). + - Effective for sparse values. + type: str + id: + description: + - The ID of the alert. + - If set, will be used instead of the name to locate the alert. + type: str + include_tags: + description: + - Whether notifications from this monitor automatically inserts its triggering tags into the title. + type: bool + default: true + version_added: 1.3.0 + priority: + description: + - Integer from 1 (high) to 5 (low) indicating alert severity. + type: int + version_added: 4.6.0 +''' + +EXAMPLES = ''' +- name: Create a metric monitor + community.general.datadog_monitor: + type: "metric alert" + name: "Test monitor" + state: "present" + query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" + notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Deletes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "absent" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Mutes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "mute" + silenced: '{"*":None}' + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Unmutes a monitor + community.general.datadog_monitor: + name: "Test monitor" + state: "unmute" + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" + +- name: Use datadoghq.eu platform instead of datadoghq.com + community.general.datadog_monitor: + name: "Test monitor" + state: "absent" + api_host: https://api.datadoghq.eu + api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" + app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" +''' +import traceback + +# Import Datadog +DATADOG_IMP_ERR = None +try: + from datadog import initialize, api + HAS_DATADOG = True +except Exception: + DATADOG_IMP_ERR = traceback.format_exc() + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_host=dict(), + app_key=dict(required=True, no_log=True), + state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), + type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert', + 'log alert', 'query alert', 'trace-analytics alert', + 'rum alert', 'composite']), + name=dict(required=True), + query=dict(), + notification_message=dict(no_log=True), + silenced=dict(type='dict'), + notify_no_data=dict(default=False, type='bool'), + no_data_timeframe=dict(), + timeout_h=dict(), + renotify_interval=dict(), + escalation_message=dict(), + notify_audit=dict(default=False, type='bool'), + thresholds=dict(type='dict', default=None), + tags=dict(type='list', elements='str', default=None), + locked=dict(default=False, type='bool'), + require_full_window=dict(type='bool'), + new_host_delay=dict(), + evaluation_delay=dict(), + id=dict(), + include_tags=dict(required=False, default=True, type='bool'), + priority=dict(type='int'), + ) + ) + + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) + + options = { + 'api_key': module.params['api_key'], + 'api_host': module.params['api_host'], + 'app_key': module.params['app_key'] + } + + initialize(**options) + + # Check if api_key and app_key is correct or not + # if not, then fail here. + response = api.Monitor.get_all() + if isinstance(response, dict): + msg = response.get('errors', None) + if msg: + module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0])) + + if module.params['state'] == 'present': + install_monitor(module) + elif module.params['state'] == 'absent': + delete_monitor(module) + elif module.params['state'] == 'mute': + mute_monitor(module) + elif module.params['state'] == 'unmute': + unmute_monitor(module) + + +def _fix_template_vars(message): + if message: + return message.replace('[[', '{{').replace(']]', '}}') + return message + + +def _get_monitor(module): + if module.params['id'] is not None: + monitor = api.Monitor.get(module.params['id']) + if 'errors' in monitor: + module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) + return monitor + else: + monitors = api.Monitor.get_all() + for monitor in monitors: + if monitor['name'] == _fix_template_vars(module.params['name']): + return monitor + return {} + + +def _post_monitor(module, options): + try: + kwargs = dict(type=module.params['type'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['notification_message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + priority=module.params['priority'], + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.create(**kwargs) + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def _equal_dicts(a, b, ignore_keys): + ka = set(a).difference(ignore_keys) + kb = set(b).difference(ignore_keys) + return ka == kb and all(a[k] == b[k] for k in ka) + + +def _update_monitor(module, monitor, options): + try: + kwargs = dict(id=monitor['id'], query=module.params['query'], + name=_fix_template_vars(module.params['name']), + message=_fix_template_vars(module.params['notification_message']), + escalation_message=_fix_template_vars(module.params['escalation_message']), + priority=module.params['priority'], + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.update(**kwargs) + + if 'errors' in msg: + module.fail_json(msg=str(msg['errors'])) + elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']): + module.exit_json(changed=False, msg=msg) + else: + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def install_monitor(module): + options = { + "silenced": module.params['silenced'], + "notify_no_data": module.boolean(module.params['notify_no_data']), + "no_data_timeframe": module.params['no_data_timeframe'], + "timeout_h": module.params['timeout_h'], + "renotify_interval": module.params['renotify_interval'], + "escalation_message": module.params['escalation_message'], + "notify_audit": module.boolean(module.params['notify_audit']), + "locked": module.boolean(module.params['locked']), + "require_full_window": module.params['require_full_window'], + "new_host_delay": module.params['new_host_delay'], + "evaluation_delay": module.params['evaluation_delay'], + "include_tags": module.params['include_tags'], + } + + if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} + if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: + options["thresholds"] = module.params['thresholds'] + + monitor = _get_monitor(module) + if not monitor: + _post_monitor(module, options) + else: + _update_monitor(module, monitor, options) + + +def delete_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.exit_json(changed=False) + try: + msg = api.Monitor.delete(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def mute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif monitor['options']['silenced']: + module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") + elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): + module.exit_json(changed=False) + try: + if module.params['silenced'] is None or module.params['silenced'] == "": + msg = api.Monitor.mute(id=monitor['id']) + else: + msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +def unmute_monitor(module): + monitor = _get_monitor(module) + if not monitor: + module.fail_json(msg="Monitor %s not found!" % module.params['name']) + elif not monitor['options']['silenced']: + module.exit_json(changed=False) + try: + msg = api.Monitor.unmute(monitor['id']) + module.exit_json(changed=True, msg=msg) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dconf.py b/ansible_collections/community/general/plugins/modules/dconf.py new file mode 100644 index 000000000..8c325486c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dconf.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Branko Majic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: dconf +author: + - "Branko Majic (@azaghal)" +short_description: Modify and read dconf database +description: + - This module allows modifications and reading of C(dconf) database. The module + is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man + page for more details. + - Since C(dconf) requires a running D-Bus session to change values, the module + will try to detect an existing session and reuse it, or run the tool via + C(dbus-run-session). +requirements: + - Optionally the C(gi.repository) Python library (usually included in the OS + on hosts which have C(dconf)); this will become a non-optional requirement + in a future major release of community.general. +notes: + - This module depends on C(psutil) Python library (version 4.0.0 and upwards), + C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on + distribution you are using, you may need to install additional packages to + have these available. + - This module uses the C(gi.repository) Python library when available for + accurate comparison of values in C(dconf) to values specified in Ansible + code. C(gi.repository) is likely to be present on most systems which have + C(dconf) but may not be present everywhere. When it is missing, a simple + string comparison between values is used, and there may be false positives, + that is, Ansible may think that a value is being changed when it is not. + This fallback will be removed in a future version of this module, at which + point the module will stop working on hosts without C(gi.repository). + - Detection of existing, running D-Bus session, required to change settings + via C(dconf), is not 100% reliable due to implementation details of D-Bus + daemon itself. This might lead to running applications not picking-up + changes on the fly if options are changed via Ansible and + C(dbus-run-session). + - Keep in mind that the C(dconf) CLI tool, which this module wraps around, + utilises an unusual syntax for the values (GVariant). For example, if you + wanted to provide a string value, the correct syntax would be + I(value="'myvalue'") - with single quotes as part of the Ansible parameter + value. + - When using loops in combination with a value like + "[('xkb', 'us'), ('xkb', 'se')]", you need to be aware of possible + type conversions. Applying a filter C({{ item.value | string }}) + to the parameter variable can avoid potential conversion problems. + - The easiest way to figure out exact syntax/value you need to provide for a + key is by making the configuration change in application affected by the + key, and then having a look at value set via commands C(dconf dump + /path/to/dir/) or C(dconf read /path/to/key). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + required: true + description: + - A dconf key to modify or read from the dconf database. + value: + type: raw + required: false + description: + - Value to set for the specified dconf key. Value should be specified in + GVariant format. Due to complexity of this format, it is best to have a + look at existing values in the dconf database. + - Required for I(state=present). + - Although the type is specified as "raw", it should typically be + specified as a string. However, boolean values in particular are + handled properly even when specified as booleans rather than strings + (in fact, handling booleans properly is why the type of this parameter + is "raw"). + state: + type: str + required: false + default: present + choices: [ 'read', 'present', 'absent' ] + description: + - The action to take upon the key/value. +''' + +RETURN = r""" +value: + description: value associated with the requested key + returned: success, state was "read" + type: str + sample: "'Default'" +""" + +EXAMPLES = r""" +- name: Configure available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + value: "[('xkb', 'us'), ('xkb', 'se')]" + state: present + +- name: Read currently available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Gnome + community.general.dconf: + key: "/org/gnome/desktop/input-sources/sources" + state: absent + +- name: Configure available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + value: "['us', 'se']" + state: present + +- name: Read currently available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: read + register: keyboard_layouts + +- name: Reset the available keyboard layouts in Cinnamon + community.general.dconf: + key: "/org/gnome/libgnomekbd/keyboard/layouts" + state: absent + +- name: Disable desktop effects in Cinnamon + community.general.dconf: + key: "/org/cinnamon/desktop-effects" + value: "false" + state: present +""" + + +import os +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.respawn import ( + has_respawned, + probe_interpreters_for_module, + respawn_module, +) +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +glib_module_name = 'gi.repository.GLib' + +try: + from gi.repository.GLib import Variant, GError +except ImportError: + Variant = None + GError = AttributeError + +with deps.declare("psutil"): + import psutil + + +class DBusWrapper(object): + """ + Helper class that can be used for running a command with a working D-Bus + session. + + If possible, command will be run against an existing D-Bus session, + otherwise the session will be spawned via dbus-run-session. + + Example usage: + + dbus_wrapper = DBusWrapper(ansible_module) + dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"]) + """ + + def __init__(self, module): + """ + Initialises an instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + """ + + # Store passed-in arguments and set-up some defaults. + self.module = module + + # Try to extract existing D-Bus session address. + self.dbus_session_bus_address = self._get_existing_dbus_session() + + # If no existing D-Bus session was detected, check if dbus-run-session + # is available. + if self.dbus_session_bus_address is None: + self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True) + + def _get_existing_dbus_session(self): + """ + Detects and returns an existing D-Bus session bus address. + + :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None. + """ + + # We'll be checking the processes of current user only. + uid = os.getuid() + + # Go through all the pids for this user, try to extract the D-Bus + # session bus address from environment, and ensure it is possible to + # connect to it. + self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid) + + for pid in psutil.pids(): + try: + process = psutil.Process(pid) + process_real_uid, dummy, dummy = process.uids() + if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): + dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] + self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) + dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) + command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] + rc, dummy, dummy = self.module.run_command(command) + + if rc == 0: + self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) + + return dbus_session_bus_address_candidate + + # This can happen with things like SSH sessions etc. + except psutil.AccessDenied: + pass + # Process has disappeared while inspecting it + except psutil.NoSuchProcess: + pass + + self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session") + + return None + + def run_command(self, command): + """ + Runs the specified command within a functional D-Bus session. Command is + effectively passed-on to AnsibleModule.run_command() method, with + modification for using dbus-run-session if necessary. + + :param command: Command to run, including parameters. Each element of the list should be a string. + :type module: list + + :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command. + """ + + if self.dbus_session_bus_address is None: + self.module.debug("Using dbus-run-session wrapper for running commands.") + command = [self.dbus_run_session_cmd] + command + rc, out, err = self.module.run_command(command) + + if self.dbus_session_bus_address is None and rc == 127: + self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err) + else: + extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address} + rc, out, err = self.module.run_command(command, environ_update=extra_environment) + + return rc, out, err + + +class DconfPreference(object): + + def __init__(self, module, check_mode=False): + """ + Initialises instance of the class. + + :param module: Ansible module instance used to signal failures and run commands. + :type module: AnsibleModule + + :param check_mode: Specify whether to only check if a change should be made or if to actually make a change. + :type check_mode: bool + """ + + self.module = module + self.check_mode = check_mode + # Check if dconf binary exists + self.dconf_bin = self.module.get_bin_path('dconf', required=True) + + @staticmethod + def variants_are_equal(canonical_value, user_value): + """Compare two string GVariant representations for equality. + + Assumes `canonical_value` is "canonical" in the sense that the type of + the variant is specified explicitly if it cannot be inferred; this is + true for textual representations of variants generated by the `dconf` + command. The type of `canonical_value` is used to parse `user_value`, + so the latter does not need to be explicitly typed. + + Returns True if the two values are equal. + """ + if canonical_value is None: + # It's unset in dconf database, so anything the user is trying to + # set is a change. + return False + try: + variant1 = Variant.parse(None, canonical_value) + variant2 = Variant.parse(variant1.get_type(), user_value) + return variant1 == variant2 + except GError: + return canonical_value == user_value + + def read(self, key): + """ + Retrieves current value associated with the dconf key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None. + """ + command = [self.dconf_bin, "read", key] + + rc, out, err = self.module.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err, + out=out, + err=err) + + if out == '': + value = None + else: + value = out.rstrip('\n') + + return value + + def write(self, key, value): + """ + Writes the value for specified key. + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key for which the value should be set. Should be a full path. + :type key: str + + :param value: Value to set for the specified dconf key. Should be specified in GVariant format. + :type value: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + # If no change is needed (or won't be done due to check_mode), notify + # caller straight away. + if self.variants_are_equal(self.read(key), value): + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for write operation, wrap + # dconf command dbus-launch. + command = [self.dconf_bin, "write", key, value] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err), + out=out, + err=err) + + # Value was changed. + return True + + def reset(self, key): + """ + Returns value for the specified key (removes it from user configuration). + + If an error occurs, a call will be made to AnsibleModule.fail_json. + + :param key: dconf key to reset. Should be a full path. + :type key: str + + :returns: bool -- True if a change was made, False if no change was required. + """ + + # Read the current value first. + current_value = self.read(key) + + # No change was needed, key is not set at all, or just notify user if we + # are in check mode. + if current_value is None: + return False + elif self.check_mode: + return True + + # Set-up command to run. Since DBus is needed for reset operation, wrap + # dconf command dbus-launch. + command = [self.dconf_bin, "reset", key] + + # Run the command and fetch standard return code, stdout, and stderr. + dbus_wrapper = DBusWrapper(self.module) + rc, out, err = dbus_wrapper.run_command(command) + + if rc != 0: + self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err, + out=out, + err=err) + + # Value was changed. + return True + + +def main(): + # Setup the Ansible module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent', 'read']), + key=dict(required=True, type='str', no_log=False), + # Converted to str below after special handling of bool. + value=dict(required=False, default=None, type='raw'), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['value']), + ], + ) + + if Variant is None: + # This interpreter can't see the GLib module. To try to fix that, we'll + # look in common locations for system-owned interpreters that can see + # it; if we find one, we'll respawn under it. Otherwise we'll proceed + # with degraded performance, without the ability to parse GVariants. + # Later (in a different PR) we'll actually deprecate this degraded + # performance level and fail with an error if the library can't be + # found. + + if has_respawned(): + # This shouldn't be possible; short-circuit early if it happens. + module.fail_json( + msg="%s must be installed and visible from %s." % + (glib_module_name, sys.executable)) + + interpreters = ['/usr/bin/python3', '/usr/bin/python2', + '/usr/bin/python'] + + interpreter = probe_interpreters_for_module( + interpreters, glib_module_name) + + if interpreter: + # Found the Python bindings; respawn this module under the + # interpreter where we found them. + respawn_module(interpreter) + # This is the end of the line for this process, it will exit here + # once the respawned module has completed. + + # Try to be forgiving about the user specifying a boolean as the value, or + # more accurately about the fact that YAML and Ansible are quite insistent + # about converting strings that look like booleans into booleans. Convert + # the boolean into a string of the type dconf will understand. Any type for + # the value other than boolean is just converted into a string directly. + if module.params['value'] is not None: + if isinstance(module.params['value'], bool): + module.params['value'] = 'true' if module.params['value'] else 'false' + else: + module.params['value'] = to_native( + module.params['value'], errors='surrogate_or_strict') + + if Variant is None: + module.warn( + 'WARNING: The gi.repository Python library is not available; ' + 'using string comparison to check value equality. This fallback ' + 'will be deprecated in a future version of community.general.') + + deps.validate(module) + + # Create wrapper instance. + dconf = DconfPreference(module, module.check_mode) + + # Process based on different states. + if module.params['state'] == 'read': + value = dconf.read(module.params['key']) + module.exit_json(changed=False, value=value) + elif module.params['state'] == 'present': + changed = dconf.write(module.params['key'], module.params['value']) + module.exit_json(changed=changed) + elif module.params['state'] == 'absent': + changed = dconf.reset(module.params['key']) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/deploy_helper.py b/ansible_collections/community/general/plugins/modules/deploy_helper.py new file mode 100644 index 000000000..f0246cae6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/deploy_helper.py @@ -0,0 +1,535 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Jasper N. Brouwer +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: deploy_helper +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects +description: + - The Deploy Helper manages some of the steps common in deploying software. + It creates a folder structure, manages a symlink for the current release + and cleans up old releases. + - "Running it with the I(state=query) or I(state=present) will return the C(deploy_helper) fact. + C(project_path), whatever you set in the I(path) parameter, + C(current_path), the path to the symlink that points to the active release, + C(releases_path), the path to the folder to keep releases in, + C(shared_path), the path to the folder to keep shared resources in, + C(unfinished_filename), the file to check for to recognize unfinished builds, + C(previous_release), the release the 'current' symlink is pointing to, + C(previous_release_path), the full path to the 'current' symlink target, + C(new_release), either the 'release' parameter or a generated timestamp, + C(new_release_path), the path to the new release folder (not created by the module)." + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + path: + type: path + required: true + aliases: ['dest'] + description: + - The root path of the project. + Returned in the C(deploy_helper.project_path) fact. + + state: + type: str + description: + - The state of the project. + C(query) will only gather facts, + C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, + C(finalize) will remove the unfinished_filename file, create a symlink to the newly + deployed release and optionally clean old releases, + C(clean) will remove failed & old releases, + C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with I(state=absent)). + choices: [ present, finalize, absent, clean, query ] + default: present + + release: + type: str + description: + - The release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). + This parameter is optional during I(state=present), but needs to be set explicitly for I(state=finalize). + You can use the generated fact I(release={{ deploy_helper.new_release }}). + + releases_path: + type: str + description: + - The name of the folder that will hold the releases. This can be relative to I(path) or absolute. + Returned in the C(deploy_helper.releases_path) fact. + default: releases + + shared_path: + type: path + description: + - The name of the folder that will hold the shared resources. This can be relative to I(path) or absolute. + If this is set to an empty string, no shared folder will be created. + Returned in the C(deploy_helper.shared_path) fact. + default: shared + + current_path: + type: path + description: + - The name of the symlink that is created when the deploy is finalized. Used in I(finalize) and I(clean). + Returned in the C(deploy_helper.current_path) fact. + default: current + + unfinished_filename: + type: str + description: + - The name of the file that indicates a deploy has not finished. All folders in the I(releases_path) that + contain this file will be deleted on I(state=finalize) with I(clean=True), or I(state=clean). This file is + automatically deleted from the I(new_release_path) during I(state=finalize). + default: DEPLOY_UNFINISHED + + clean: + description: + - Whether to run the clean procedure in case of I(state=finalize). + type: bool + default: true + + keep_releases: + type: int + description: + - The number of old releases to keep when cleaning. Used in I(finalize) and I(clean). Any unfinished builds + will be deleted first, so only correct releases will count. The current version will not count. + default: 5 + +notes: + - Facts are only returned for I(state=query) and I(state=present). If you use both, you should pass any overridden + parameters to both calls, otherwise the second call will overwrite the facts of the first one. + - When using I(state=clean), the releases are ordered by I(creation date). You should be able to switch to a + new naming strategy without problems. + - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent + unless you pass your own release name with I(release). Due to the nature of deploying software, this should not + be much of a problem. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +''' + +EXAMPLES = ''' + +# General explanation, starting with an example folder structure for a project: + +# root: +# releases: +# - 20140415234508 +# - 20140415235146 +# - 20140416082818 +# +# shared: +# - sessions +# - uploads +# +# current: releases/20140416082818 + + +# The 'releases' folder holds all the available releases. A release is a complete build of the application being +# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +# git tags or commit hashes. +# +# During a deploy, a new folder should be created in the releases folder and any build steps required should be +# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +# with a link to this build. +# +# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. +# +# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +# release is reduced to the time it takes to switch the link. +# +# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +# procedure to remove it during cleanup. + + +# Typical usage +- name: Initialize the deploy root and gather facts + community.general.deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + ansible.builtin.git: + repo: ansible.builtin.git://foosball.example.org/path/to/repo.git + dest: '{{ deploy_helper.new_release_path }}' + version: v1.1.1 +- name: Add an unfinished file, to allow cleanup on successful finalize + ansible.builtin.file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + ansible.builtin.file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + ansible.builtin.file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + community.general.deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- community.general.deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- community.general.deploy_helper: + path: /path/to/root + release: v1.1.1 + state: present +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- community.general.deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: false +- community.general.deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- community.general.deploy_helper: + path: /path/to/root + state: clean +- community.general.deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- community.general.deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- community.general.deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- community.general.deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- community.general.deploy_helper: + path: /path/to/root +- ansible.builtin.debug: + var: deploy_helper +''' +import os +import shutil +import time +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class DeployHelper(object): + + def __init__(self, module): + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + if self.release: + new_release_path = os.path.join(releases_path, self.release) + else: + new_release_path = None + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception as e: + self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc()) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + if not self.release: + return changed + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(aliases=['dest'], required=True, type='path'), + release=dict(type='str'), + releases_path=dict(type='str', default='releases'), + shared_path=dict(type='path', default='shared'), + current_path=dict(type='path', default='current'), + keep_releases=dict(type='int', default=5), + clean=dict(type='bool', default=True), + unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'), + state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + required_if=[ + ('state', 'finalize', ['release']), + ], + add_file_common_args=True, + supports_check_mode=True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = {'deploy_helper': facts} + + elif deploy_helper.state == 'finalize': + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = {'deploy_helper': []} + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py new file mode 100644 index 000000000..8c1469063 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Dimension Data +# Authors: +# - Aimon Bustardo +# - Bert Diwa +# - Adam Friedman +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: dimensiondata_network +short_description: Create, update, and delete MCP 1.0 & 2.0 networks +extends_documentation_fragment: + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes + +description: + - Create, update, and delete MCP 1.0 & 2.0 networks +author: 'Aimon Bustardo (@aimonb)' +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The name of the network domain to create. + required: true + type: str + description: + description: + - Additional description of the network domain. + required: false + type: str + service_plan: + description: + - The service plan, either "ESSENTIALS" or "ADVANCED". + - MCP 2.0 Only. + choices: [ESSENTIALS, ADVANCED] + default: ESSENTIALS + type: str + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + type: str +''' + +EXAMPLES = ''' +- name: Create an MCP 1.0 network + community.general.dimensiondata_network: + region: na + location: NA5 + name: mynet + +- name: Create an MCP 2.0 network + community.general.dimensiondata_network: + region: na + mcp_user: my_user + mcp_password: my_password + location: NA9 + name: mynet + service_plan: ADVANCED + +- name: Delete a network + community.general.dimensiondata_network: + region: na + location: NA1 + name: mynet + state: absent +''' + +RETURN = ''' +network: + description: Dictionary describing the network. + returned: On success when I(state=present). + type: complex + contains: + id: + description: Network ID. + type: str + sample: "8c787000-a000-4050-a215-280893411a7d" + name: + description: Network name. + type: str + sample: "My network" + description: + description: Network description. + type: str + sample: "My network description" + location: + description: Datacenter location. + type: str + sample: NA3 + status: + description: Network status. (MCP 2.0 only) + type: str + sample: NORMAL + private_net: + description: Private network subnet. (MCP 1.0 only) + type: str + sample: "10.2.3.0" + multicast: + description: Multicast enabled? (MCP 1.0 only) + type: bool + sample: false +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule +from ansible.module_utils.common.text.converters import to_native + +if HAS_LIBCLOUD: + from libcloud.compute.base import NodeLocation + from libcloud.common.dimensiondata import DimensionDataAPIException + + +class DimensionDataNetworkModule(DimensionDataModule): + """ + The dimensiondata_network module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data network module. + """ + + super(DimensionDataNetworkModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(type='str', required=True), + description=dict(type='str', required=False), + service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), + state=dict(default='present', choices=['present', 'absent']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.service_plan = self.module.params['service_plan'] + self.state = self.module.params['state'] + + def state_present(self): + network = self._get_network() + + if network: + self.module.exit_json( + changed=False, + msg='Network already exists', + network=self._network_to_dict(network) + ) + + network = self._create_network() + + self.module.exit_json( + changed=True, + msg='Created network "%s" in datacenter "%s".' % (self.name, self.location), + network=self._network_to_dict(network) + ) + + def state_absent(self): + network = self._get_network() + + if not network: + self.module.exit_json( + changed=False, + msg='Network "%s" does not exist' % self.name, + network=self._network_to_dict(network) + ) + + self._delete_network(network) + + def _get_network(self): + if self.mcp_version == '1.0': + networks = self.driver.list_networks(location=self.location) + else: + networks = self.driver.ex_list_network_domains(location=self.location) + + matched_network = [network for network in networks if network.name == self.name] + if matched_network: + return matched_network[0] + + return None + + def _network_to_dict(self, network): + network_dict = dict( + id=network.id, + name=network.name, + description=network.description + ) + + if isinstance(network.location, NodeLocation): + network_dict['location'] = network.location.id + else: + network_dict['location'] = network.location + + if self.mcp_version == '1.0': + network_dict['private_net'] = network.private_net + network_dict['multicast'] = network.multicast + network_dict['status'] = None + else: + network_dict['private_net'] = None + network_dict['multicast'] = None + network_dict['status'] = network.status + + return network_dict + + def _create_network(self): + + # Make sure service_plan argument is defined + if self.mcp_version == '2.0' and 'service_plan' not in self.module.params: + self.module.fail_json( + msg='service_plan required when creating network and location is MCP 2.0' + ) + + # Create network + try: + if self.mcp_version == '1.0': + network = self.driver.ex_create_network( + self.location, + self.name, + description=self.description + ) + else: + network = self.driver.ex_create_network_domain( + self.location, + self.name, + self.module.params['service_plan'], + description=self.description + ) + except DimensionDataAPIException as e: + + self.module.fail_json( + msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc() + ) + + if self.module.params['wait'] is True: + network = self._wait_for_network_state(network.id, 'NORMAL') + + return network + + def _delete_network(self, network): + try: + if self.mcp_version == '1.0': + deleted = self.driver.ex_delete_network(network) + else: + deleted = self.driver.ex_delete_network_domain(network) + + if deleted: + self.module.exit_json( + changed=True, + msg="Deleted network with id %s" % network.id + ) + + self.module.fail_json( + "Unexpected failure deleting network with id %s" % network.id + ) + + except DimensionDataAPIException as e: + self.module.fail_json( + msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc() + ) + + def _wait_for_network_state(self, net_id, state_to_wait_for): + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_network_domain, + self.module.params['wait_poll_interval'], + self.module.params['wait_time'], + net_id + ) + except DimensionDataAPIException as e: + self.module.fail_json( + msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)), + exception=traceback.format_exc() + ) + + +def main(): + module = DimensionDataNetworkModule() + if module.state == 'present': + module.state_present() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py new file mode 100644 index 000000000..7d83ddc69 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py @@ -0,0 +1,564 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016 Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Authors: +# - Adam Friedman + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: dimensiondata_vlan +short_description: Manage a VLAN in a Cloud Control network domain +extends_documentation_fragment: + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes + +description: + - Manage VLANs in Cloud Control network domains. +author: 'Adam Friedman (@tintoy)' +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The name of the target VLAN. + type: str + required: true + description: + description: + - A description of the VLAN. + type: str + default: '' + network_domain: + description: + - The Id or name of the target network domain. + required: true + type: str + private_ipv4_base_address: + description: + - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). + type: str + default: '' + private_ipv4_prefix_size: + description: + - The size of the IPv4 address space, e.g 24. + - Required, if C(private_ipv4_base_address) is specified. + type: int + default: 0 + state: + description: + - The desired state for the target VLAN. + - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). + choices: [present, absent, readonly] + default: present + type: str + allow_expand: + description: + - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. + - If C(False), the module will fail under these conditions. + - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Add or update VLAN + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + description: A test VLAN + private_ipv4_base_address: 192.168.23.0 + private_ipv4_prefix_size: 24 + state: present + wait: true + +- name: Read / get VLAN details + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan1 + state: readonly + wait: true + +- name: Delete a VLAN + community.general.dimensiondata_vlan: + region: na + location: NA5 + network_domain: test_network + name: my_vlan_1 + state: absent + wait: true +''' + +RETURN = ''' +vlan: + description: Dictionary describing the VLAN. + returned: On success when I(state) is 'present' + type: complex + contains: + id: + description: VLAN ID. + type: str + sample: "aaaaa000-a000-4050-a215-2808934ccccc" + name: + description: VLAN name. + type: str + sample: "My VLAN" + description: + description: VLAN description. + type: str + sample: "My VLAN description" + location: + description: Datacenter location. + type: str + sample: NA3 + private_ipv4_base_address: + description: The base address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.0 + private_ipv4_prefix_size: + description: The prefix size for the VLAN's private IPV4 network. + type: int + sample: 24 + private_ipv4_gateway_address: + description: The gateway address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.1 + private_ipv6_base_address: + description: The base address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:0 + private_ipv6_prefix_size: + description: The prefix size for the VLAN's IPV6 network. + type: int + sample: 64 + private_ipv6_gateway_address: + description: The gateway address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:1 + status: + description: VLAN status. + type: str + sample: NORMAL +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError + +try: + from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException + + HAS_LIBCLOUD = True + +except ImportError: + DimensionDataVlan = None + + HAS_LIBCLOUD = False + + +class DimensionDataVlanModule(DimensionDataModule): + """ + The dimensiondata_vlan module for Ansible. + """ + + def __init__(self): + """ + Create a new Dimension Data VLAN module. + """ + + super(DimensionDataVlanModule, self).__init__( + module=AnsibleModule( + argument_spec=DimensionDataModule.argument_spec_with_wait( + name=dict(required=True, type='str'), + description=dict(default='', type='str'), + network_domain=dict(required=True, type='str'), + private_ipv4_base_address=dict(default='', type='str'), + private_ipv4_prefix_size=dict(default=0, type='int'), + allow_expand=dict(required=False, default=False, type='bool'), + state=dict(default='present', choices=['present', 'absent', 'readonly']) + ), + required_together=DimensionDataModule.required_together() + ) + ) + + self.name = self.module.params['name'] + self.description = self.module.params['description'] + self.network_domain_selector = self.module.params['network_domain'] + self.private_ipv4_base_address = self.module.params['private_ipv4_base_address'] + self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size'] + self.state = self.module.params['state'] + self.allow_expand = self.module.params['allow_expand'] + + if self.wait and self.state != 'present': + self.module.fail_json( + msg='The wait parameter is only supported when state is "present".' + ) + + def state_present(self): + """ + Ensure that the target VLAN is present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + vlan = self._create_vlan(network_domain) + self.module.exit_json( + msg='Created VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + else: + diff = VlanDiff(vlan, self.module.params) + if not diff.has_changes(): + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=False + ) + + return + + try: + diff.ensure_legal_change() + except InvalidVlanChangeError as invalid_vlan_change: + self.module.fail_json( + msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format( + self.name, self.network_domain_selector, invalid_vlan_change + ) + ) + + if diff.needs_expand() and not self.allow_expand: + self.module.fail_json( + msg='The configured private IPv4 network size ({0}-bit prefix) for '.format( + self.private_ipv4_prefix_size + ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format( + vlan.private_ipv4_range_size + ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.' + ) + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + if diff.needs_edit(): + vlan.name = self.name + vlan.description = self.description + + self.driver.ex_update_vlan(vlan) + + if diff.needs_expand(): + vlan.private_ipv4_range_size = self.private_ipv4_prefix_size + self.driver.ex_expand_vlan(vlan) + + self.module.exit_json( + msg='Updated VLAN "{0}" in network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + def state_readonly(self): + """ + Read the target VLAN's state. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if vlan: + self.module.exit_json( + vlan=vlan_to_dict(vlan), + changed=False + ) + else: + self.module.fail_json( + msg='VLAN "{0}" does not exist in network domain "{1}".'.format( + self.name, self.network_domain_selector + ) + ) + + def state_absent(self): + """ + Ensure that the target VLAN is not present. + """ + + network_domain = self._get_network_domain() + + vlan = self._get_vlan(network_domain) + if not vlan: + self.module.exit_json( + msg='VLAN "{0}" is absent from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=False + ) + + return + + if self.module.check_mode: + self.module.exit_json( + msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format( + self.name, self.network_domain_selector + ), + vlan=vlan_to_dict(vlan), + changed=True + ) + + self._delete_vlan(vlan) + + self.module.exit_json( + msg='Deleted VLAN "{0}" from network domain "{1}".'.format( + self.name, self.network_domain_selector + ), + changed=True + ) + + def _get_vlan(self, network_domain): + """ + Retrieve the target VLAN details from CloudControl. + + :param network_domain: The target network domain. + :return: The VLAN, or None if the target VLAN was not found. + :rtype: DimensionDataVlan + """ + + vlans = self.driver.ex_list_vlans( + location=self.location, + network_domain=network_domain + ) + matching_vlans = [vlan for vlan in vlans if vlan.name == self.name] + if matching_vlans: + return matching_vlans[0] + + return None + + def _create_vlan(self, network_domain): + vlan = self.driver.ex_create_vlan( + network_domain, + self.name, + self.private_ipv4_base_address, + self.description, + self.private_ipv4_prefix_size + ) + + if self.wait: + vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL') + + return vlan + + def _delete_vlan(self, vlan): + try: + self.driver.ex_delete_vlan(vlan) + + # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present"). + if self.wait: + self._wait_for_vlan_state(vlan, 'NOT_FOUND') + + except DimensionDataAPIException as api_exception: + self.module.fail_json( + msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format( + vlan.id, api_exception.msg + ) + ) + + def _wait_for_vlan_state(self, vlan, state_to_wait_for): + network_domain = self._get_network_domain() + + wait_poll_interval = self.module.params['wait_poll_interval'] + wait_time = self.module.params['wait_time'] + + # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try. + + try: + return self.driver.connection.wait_for_state( + state_to_wait_for, + self.driver.ex_get_vlan, + wait_poll_interval, + wait_time, + vlan + ) + + except DimensionDataAPIException as api_exception: + if api_exception.code != 'RESOURCE_NOT_FOUND': + raise + + return DimensionDataVlan( + id=vlan.id, + status='NOT_FOUND', + name='', + description='', + private_ipv4_range_address='', + private_ipv4_range_size=0, + ipv4_gateway='', + ipv6_range_address='', + ipv6_range_size=0, + ipv6_gateway='', + location=self.location, + network_domain=network_domain + ) + + def _get_network_domain(self): + """ + Retrieve the target network domain from the Cloud Control API. + + :return: The network domain. + """ + + try: + return self.get_network_domain( + self.network_domain_selector, self.location + ) + except UnknownNetworkError: + self.module.fail_json( + msg='Cannot find network domain "{0}" in datacenter "{1}".'.format( + self.network_domain_selector, self.location + ) + ) + + return None + + +class InvalidVlanChangeError(Exception): + """ + Error raised when an illegal change to VLAN state is attempted. + """ + + pass + + +class VlanDiff(object): + """ + Represents differences between VLAN information (from CloudControl) and module parameters. + """ + + def __init__(self, vlan, module_params): + """ + + :param vlan: The VLAN information from CloudControl. + :type vlan: DimensionDataVlan + :param module_params: The module parameters. + :type module_params: dict + """ + + self.vlan = vlan + self.module_params = module_params + + self.name_changed = module_params['name'] != vlan.name + self.description_changed = module_params['description'] != vlan.description + self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address + self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size + + # Is configured prefix size greater than or less than the actual prefix size? + private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size + self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0 + self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0 + + def has_changes(self): + """ + Does the VlanDiff represent any changes between the VLAN and module configuration? + + :return: True, if there are change changes; otherwise, False. + """ + + return self.needs_edit() or self.needs_expand() + + def ensure_legal_change(self): + """ + Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state. + + - private_ipv4_base_address cannot be changed + - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size + + :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state. + """ + + # Cannot change base address for private IPv4 network. + if self.private_ipv4_base_address_changed: + raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.') + + # Cannot shrink private IPv4 network (by increasing prefix size). + if self.private_ipv4_prefix_size_increased: + raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).') + + def needs_edit(self): + """ + Is an Edit operation required to resolve the differences between the VLAN information and the module parameters? + + :return: True, if an Edit operation is required; otherwise, False. + """ + + return self.name_changed or self.description_changed + + def needs_expand(self): + """ + Is an Expand operation required to resolve the differences between the VLAN information and the module parameters? + + The VLAN's network is expanded by reducing the size of its network prefix. + + :return: True, if an Expand operation is required; otherwise, False. + """ + + return self.private_ipv4_prefix_size_decreased + + +def vlan_to_dict(vlan): + return { + 'id': vlan.id, + 'name': vlan.name, + 'description': vlan.description, + 'location': vlan.location.id, + 'private_ipv4_base_address': vlan.private_ipv4_range_address, + 'private_ipv4_prefix_size': vlan.private_ipv4_range_size, + 'private_ipv4_gateway_address': vlan.ipv4_gateway, + 'ipv6_base_address': vlan.ipv6_range_address, + 'ipv6_prefix_size': vlan.ipv6_range_size, + 'ipv6_gateway_address': vlan.ipv6_gateway, + 'status': vlan.status + } + + +def main(): + module = DimensionDataVlanModule() + + if module.state == 'present': + module.state_present() + elif module.state == 'readonly': + module.state_readonly() + elif module.state == 'absent': + module.state_absent() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/discord.py b/ansible_collections/community/general/plugins/modules/discord.py new file mode 100644 index 000000000..8b5391d44 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/discord.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: discord +short_description: Send Discord messages +version_added: 3.1.0 +description: + - Sends a message to a Discord channel using the Discord webhook API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Discord API + link: https://discord.com/developers/docs/resources/webhook#execute-webhook +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + webhook_id: + description: + - The webhook ID. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: true + type: str + webhook_token: + description: + - The webhook token. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: true + type: str + content: + description: + - Content of the message to the Discord channel. + - At least one of I(content) and I(embeds) must be specified. + type: str + username: + description: + - Overrides the default username of the webhook. + type: str + avatar_url: + description: + - Overrides the default avatar of the webhook. + type: str + tts: + description: + - Set this to C(true) if this is a TTS (Text to Speech) message. + type: bool + default: false + embeds: + description: + - Send messages as Embeds to the Discord channel. + - Embeds can have a colored border, embedded images, text fields and more. + - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" + - At least one of I(content) and I(embeds) must be specified. + type: list + elements: dict +''' + +EXAMPLES = """ +- name: Send a message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + +- name: Send a message to the Discord channel with specific username and avatar + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + username: Ansible + avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send a embedded message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "Embedded message" + description: "This is an embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send two embedded messages + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "First message" + description: "This is my first embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + - title: "Second message" + description: "This is my first second message" + footer: + text: "Author: Ansible" + icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + fields: + - name: "Field 1" + value: "Value of my first field" + - name: "Field 2" + value: "Value of my second field" + timestamp: "{{ ansible_date_time.iso8601 }}" +""" + +RETURN = """ +http_code: + description: + - Response Code returned by Discord API. + returned: always + type: int + sample: 204 +""" + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule + + +def discord_check_mode(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + response, info = fetch_url(module, url, method='GET', headers=headers) + return response, info + + +def discord_text_msg(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + content = module.params['content'] + user = module.params['username'] + avatar_url = module.params['avatar_url'] + tts = module.params['tts'] + embeds = module.params['embeds'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + payload = { + 'content': content, + 'username': user, + 'avatar_url': avatar_url, + 'tts': tts, + 'embeds': embeds, + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') + return response, info + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook_id=dict(type='str', required=True), + webhook_token=dict(type='str', required=True, no_log=True), + content=dict(type='str'), + username=dict(type='str'), + avatar_url=dict(type='str'), + tts=dict(type='bool', default=False), + embeds=dict(type='list', elements='dict'), + ), + required_one_of=[['content', 'embeds']], + supports_check_mode=True + ) + + result = dict( + changed=False, + http_code='', + ) + + if module.check_mode: + response, info = discord_check_mode(module) + if info['status'] != 200: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) + else: + response, info = discord_text_msg(module) + if info['status'] != 204: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/django_manage.py b/ansible_collections/community/general/plugins/modules/django_manage.py new file mode 100644 index 000000000..537cf0fa7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/django_manage.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2013, Scott Anderson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: django_manage +short_description: Manages a Django application +description: + - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the + I(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation. + - > + C(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be + removed in community.general 9.0.0. Use C(clearsessions) instead. + - C(collectstatic) - Collects the static files into C(STATIC_ROOT). + - C(createcachetable) - Creates the cache tables for use with the database cache backend. + - C(flush) - Removes all data from the database. + - C(loaddata) - Searches for and loads the contents of the named I(fixtures) into the database. + - C(migrate) - Synchronizes the database state with models and migrations. + - > + C(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7). + This parameter will be removed in community.general 9.0.0. Use C(migrate) instead. + - C(test) - Runs tests for all installed apps. + - > + C(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be + removed in community.general 9.0.0. Use C(check) instead. + - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may + prompt for user input should be run with the C(--noinput) flag. + type: str + required: true + project_path: + description: + - The path to the root of the Django application where C(manage.py) lives. + type: path + required: true + aliases: [app_path, chdir] + settings: + description: + - The Python path to the application's settings module, such as C(myapp.settings). + type: path + required: false + pythonpath: + description: + - A directory to add to the Python path. Typically used to include the settings module if it is located + external to the application directory. + - This would be equivalent to adding I(pythonpath)'s value to the C(PYTHONPATH) environment variable. + type: path + required: false + aliases: [python_path] + virtualenv: + description: + - An optional path to a C(virtualenv) installation to use while running the manage application. + type: path + aliases: [virtual_env] + apps: + description: + - A list of space-delimited apps to target. Used by the C(test) command. + type: str + required: false + cache_table: + description: + - The name of the table used for database-backed caching. Used by the C(createcachetable) command. + type: str + required: false + clear: + description: + - Clear the existing files before trying to copy or link the original file. + - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically. + required: false + default: false + type: bool + database: + description: + - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb), + and C(migrate) commands. + type: str + required: false + failfast: + description: + - Fail the command immediately if a test fails. Used by the C(test) command. + required: false + default: false + type: bool + aliases: [fail_fast] + fixtures: + description: + - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command. + type: str + required: false + skip: + description: + - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command. + required: false + type: bool + merge: + description: + - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this + parameter with C(migrate) command. + required: false + type: bool + link: + description: + - Will create links to the files instead of copying them, you can only use this parameter with + C(collectstatic) command. + required: false + type: bool + testrunner: + description: + - Controls the test runner class that is used to execute tests. + - This parameter is passed as-is to C(manage.py). + type: str + required: false + aliases: [test_runner] + ack_venv_creation_deprecation: + description: + - >- + When a I(virtualenv) is set but the virtual environment does not exist, the current behavior is + to create a new virtual environment. That behavior is deprecated and if that case happens it will + generate a deprecation warning. Set this flag to C(true) to suppress the deprecation warning. + - Please note that you will receive no further warning about this being removed until the module + will start failing in such cases from community.general 9.0.0 on. + type: bool + version_added: 5.8.0 + +notes: + - > + B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in + community.general version 9.0.0 (estimated to be released in May 2024). + Please notice that Django 4.1 requires Python 3.8 or greater. + - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter + is specified. This requirement is deprecated and will be removed in community.general version 9.0.0. + - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already + exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0. + - The parameter I(virtualenv) will remain in use, but it will require the specified virtualenv to exist. + The recommended way to create one in Ansible is by using M(ansible.builtin.pip). + - This module assumes English error messages for the C(createcachetable) command to detect table existence, + unfortunately. + - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added + as an app in your settings. + - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings. + - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang, + i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter. +seealso: + - name: django-admin and manage.py Reference + description: Reference for C(django-admin) or C(manage.py) commands. + link: https://docs.djangoproject.com/en/4.1/ref/django-admin/ + - name: Django Download page + description: The page showing how to get Django and the timeline of supported releases. + link: https://www.djangoproject.com/download/ + - name: What Python version can I use with Django? + description: From the Django FAQ, the response to Python requirements for the framework. + link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django +requirements: [ "virtualenv", "django" ] +author: + - Alexei Znamensky (@russoz) + - Scott Anderson (@tastychutney) +''' + +EXAMPLES = """ +- name: Run cleanup on the application installed in django_dir + community.general.django_manage: + command: cleanup + project_path: "{{ django_dir }}" + +- name: Load the initial_data fixture into the application + community.general.django_manage: + command: loaddata + project_path: "{{ django_dir }}" + fixtures: "{{ initial_data }}" + +- name: Run syncdb on the application + community.general.django_manage: + command: syncdb + project_path: "{{ django_dir }}" + settings: "{{ settings_app_name }}" + pythonpath: "{{ settings_dir }}" + virtualenv: "{{ virtualenv_dir }}" + +- name: Run the SmokeTest test case from the main app. Useful for testing deploys + community.general.django_manage: + command: test + project_path: "{{ django_dir }}" + apps: main.SmokeTest + +- name: Create an initial superuser + community.general.django_manage: + command: "createsuperuser --noinput --username=admin --email=admin@example.com" + project_path: "{{ django_dir }}" +""" + +import os +import sys +import shlex + +from ansible.module_utils.basic import AnsibleModule + + +def _fail(module, cmd, out, err, **kwargs): + msg = '' + if out: + msg += "stdout: %s" % (out, ) + if err: + msg += "\n:stderr: %s" % (err, ) + module.fail_json(cmd=cmd, msg=msg, **kwargs) + + +def _ensure_virtualenv(module): + + venv_param = module.params['virtualenv'] + if venv_param is None: + return + + vbin = os.path.join(venv_param, 'bin') + activate = os.path.join(vbin, 'activate') + + if not os.path.exists(activate): + # In version 9.0.0, if the venv is not found, it should fail_json() here. + if not module.params['ack_venv_creation_deprecation']: + module.deprecate( + 'The behavior of "creating the virtual environment when missing" is being ' + 'deprecated and will be removed in community.general version 9.0.0. ' + 'Set the module parameter `ack_venv_creation_deprecation: true` to ' + 'prevent this message from showing up when creating a virtualenv.', + version='9.0.0', + collection_name='community.general', + ) + + virtualenv = module.get_bin_path('virtualenv', True) + vcmd = [virtualenv, venv_param] + rc, out_venv, err_venv = module.run_command(vcmd) + if rc != 0: + _fail(module, vcmd, out_venv, err_venv) + + os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) + os.environ["VIRTUAL_ENV"] = venv_param + + +def createcachetable_check_changed(output): + return "already exists" not in output + + +def flush_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def loaddata_filter_output(line): + return "Installed" in line and "Installed 0 object" not in line + + +def syncdb_filter_output(line): + return ("Creating table " in line) \ + or ("Installed" in line and "Installed 0 object" not in line) + + +def migrate_filter_output(line): + return ("Migrating forwards " in line) \ + or ("Installed" in line and "Installed 0 object" not in line) \ + or ("Applying" in line) + + +def collectstatic_filter_output(line): + return line and "0 static files" not in line + + +def main(): + command_allowed_param_map = dict( + cleanup=(), + createcachetable=('cache_table', 'database', ), + flush=('database', ), + loaddata=('database', 'fixtures', ), + syncdb=('database', ), + test=('failfast', 'testrunner', 'apps', ), + validate=(), + migrate=('apps', 'skip', 'merge', 'database',), + collectstatic=('clear', 'link', ), + ) + + command_required_param_map = dict( + loaddata=('fixtures', ), + ) + + # forces --noinput on every command that needs it + noinput_commands = ( + 'flush', + 'syncdb', + 'migrate', + 'test', + 'collectstatic', + ) + + # These params are allowed for certain commands only + specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner') + + # These params are automatically added to the command if present + general_params = ('settings', 'pythonpath', 'database',) + specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') + end_of_command_params = ('apps', 'cache_table', 'fixtures') + + module = AnsibleModule( + argument_spec=dict( + command=dict(required=True, type='str'), + project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']), + settings=dict(type='path'), + pythonpath=dict(type='path', aliases=['python_path']), + virtualenv=dict(type='path', aliases=['virtual_env']), + + apps=dict(), + cache_table=dict(type='str'), + clear=dict(default=False, type='bool'), + database=dict(type='str'), + failfast=dict(default=False, type='bool', aliases=['fail_fast']), + fixtures=dict(type='str'), + testrunner=dict(type='str', aliases=['test_runner']), + skip=dict(type='bool'), + merge=dict(type='bool'), + link=dict(type='bool'), + ack_venv_creation_deprecation=dict(type='bool'), + ), + ) + + command_split = shlex.split(module.params['command']) + command_bin = command_split[0] + project_path = module.params['project_path'] + virtualenv = module.params['virtualenv'] + + try: + _deprecation = dict( + cleanup="clearsessions", + syncdb="migrate", + validate="check", + ) + module.deprecate( + 'The command {0} has been deprecated as it is no longer supported in recent Django versions.' + 'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]), + version='9.0.0', + collection_name='community.general' + ) + except KeyError: + pass + + for param in specific_params: + value = module.params[param] + if value and param not in command_allowed_param_map[command_bin]: + module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin)) + + for param in command_required_param_map.get(command_bin, ()): + if not module.params[param]: + module.fail_json(msg='%s param is required for command=%s' % (param, command_bin)) + + _ensure_virtualenv(module) + + run_cmd_args = ["./manage.py"] + command_split + + if command_bin in noinput_commands and '--noinput' not in command_split: + run_cmd_args.append("--noinput") + + for param in general_params: + if module.params[param]: + run_cmd_args.append('--%s=%s' % (param, module.params[param])) + + for param in specific_boolean_params: + if module.params[param]: + run_cmd_args.append('--%s' % param) + + # these params always get tacked on the end of the command + for param in end_of_command_params: + if module.params[param]: + if param in ('fixtures', 'apps'): + run_cmd_args.extend(shlex.split(module.params[param])) + else: + run_cmd_args.append(module.params[param]) + + rc, out, err = module.run_command(run_cmd_args, cwd=project_path) + if rc != 0: + if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err: + out = 'already exists.' + else: + if "Unknown command:" in err: + _fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin) + _fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path) + + changed = False + + lines = out.split('\n') + filt = globals().get(command_bin + "_filter_output", None) + if filt: + filtered_output = list(filter(filt, lines)) + if len(filtered_output): + changed = True + check_changed = globals().get("{0}_check_changed".format(command_bin), None) + if check_changed: + changed = check_changed(out) + + module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path, + virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py new file mode 100644 index 000000000..fac3ad78d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Roberto Moreda +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: dnf_versionlock +version_added: '4.0.0' +short_description: Locks package versions in C(dnf) based systems +description: +- Locks package versions using the C(versionlock) plugin in C(dnf) based + systems. This plugin takes a set of name and versions for packages and + excludes all other versions of those packages. This allows you to for example + protect packages from being updated by newer versions. The state of the + plugin that reflects locking of packages is the C(locklist). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - The logics of the C(versionlock) plugin for corner cases could be + confusing, so please take in account that this module will do its best to + give a C(check_mode) prediction on what is going to happen. In case of + doubt, check the documentation of the plugin. + - Sometimes the module could predict changes in C(check_mode) that will not + be such because C(versionlock) concludes that there is already a entry in + C(locklist) that already matches. + diff_mode: + support: none +options: + name: + description: + - Package name spec to add or exclude to or delete from the C(locklist) + using the format expected by the C(dnf repoquery) command. + - This parameter is mutually exclusive with I(state=clean). + type: list + required: false + elements: str + default: [] + raw: + description: + - Do not resolve package name specs to NEVRAs to find specific version + to lock to. Instead the package name specs are used as they are. This + enables locking to not yet available versions of the package. + type: bool + default: false + state: + description: + - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or + C(clean)) from the C(locklist). + - C(present) will add a package name spec to the C(locklist). If there is a + installed package that matches, then only that version will be added. + Otherwise, all available package versions will be added. + - C(excluded) will add a package name spec as excluded to the + C(locklist). It means that packages represented by the package name + spec will be excluded from transaction operations. All available + package versions will be added. + - C(absent) will delete entries in the C(locklist) that match the + package name spec. + - C(clean) will delete all entries in the C(locklist). This option is + mutually exclusive with C(name). + choices: [ 'absent', 'clean', 'excluded', 'present' ] + type: str + default: present +notes: + - In an ideal world, the C(versionlock) plugin would have a dry-run option to + know for sure what is going to happen. So far we have to work with a best + guess as close as possible to the behaviour inferred from its code. + - For most of cases where you want to lock and unlock specific versions of a + package, this works fairly well. +requirements: + - dnf + - dnf-plugin-versionlock +author: + - Roberto Moreda (@moreda) +''' + +EXAMPLES = r''' +- name: Prevent installed nginx from being updated + community.general.dnf_versionlock: + name: nginx + state: present + +- name: Prevent multiple packages from being updated + community.general.dnf_versionlock: + name: + - nginx + - haproxy + state: present + +- name: Remove lock from nginx to be updated again + community.general.dnf_versionlock: + package: nginx + state: absent + +- name: Exclude bind 32:9.11 from installs or updates + community.general.dnf_versionlock: + package: bind-32:9.11* + state: excluded + +- name: Keep bash package in major version 4 + community.general.dnf_versionlock: + name: bash-0:4.* + raw: true + state: present + +- name: Delete all entries in the locklist of versionlock + community.general.dnf_versionlock: + state: clean +''' + +RETURN = r''' +locklist_pre: + description: Locklist before module execution. + returned: success + type: list + elements: str + sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ] +locklist_post: + description: Locklist after module execution. + returned: success and (not check mode or state is clean) + type: list + elements: str + sample: [ 'bash-0:4.4.20-1.el8_4.*' ] +specs_toadd: + description: Package name specs meant to be added by versionlock. + returned: success + type: list + elements: str + sample: [ 'bash' ] +specs_todelete: + description: Package name specs meant to be deleted by versionlock. + returned: success + type: list + elements: str + sample: [ 'bind' ] +''' + +from ansible.module_utils.basic import AnsibleModule +import fnmatch +import os +import re + +DNF_BIN = "/usr/bin/dnf" +VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf" +# NEVRA regex. +NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-" + r"(?P.+)\.(?P.+)$") + + +def do_versionlock(module, command, patterns=None, raw=False): + patterns = [] if not patterns else patterns + raw_parameter = ["--raw"] if raw else [] + # Call dnf versionlock using a just one full NEVR package-name-spec each + # time because multiple package-name-spec and globs are not well supported. + # + # This is a workaround for two alleged bugs in the dnf versionlock plugin: + # * Multiple package-name-spec arguments don't lock correctly + # (https://bugzilla.redhat.com/show_bug.cgi?id=2013324). + # * Locking a version of a not installed package disallows locking other + # versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332) + # + # NOTE: This is suboptimal in terms of performance if there are more than a + # few package-name-spec patterns to lock, because there is a command + # execution per each. This will improve by changing the strategy once the + # mentioned alleged bugs in the dnf versionlock plugin are fixed. + if patterns: + outs = [] + for p in patterns: + rc, out, err = module.run_command( + [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p], + check_rc=True) + outs.append(out) + out = "\n".join(outs) + else: + rc, out, err = module.run_command( + [DNF_BIN, "-q", "versionlock", command], check_rc=True) + return out + + +# This is equivalent to the _match function of the versionlock plugin. +def match(entry, pattern): + entry = entry.lstrip('!') + if entry == pattern: + return True + m = NEVRA_RE.match(entry) + if not m: + return False + for name in ( + '%s' % m["name"], + '%s.%s' % (m["name"], m["arch"]), + '%s-%s' % (m["name"], m["version"]), + '%s-%s-%s' % (m["name"], m["version"], m["release"]), + '%s-%s:%s' % (m["name"], m["epoch"], m["version"]), + '%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]), + '%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]), + '%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"], + m["arch"]), + '%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"], + m["arch"]) + ): + if fnmatch.fnmatch(name, pattern): + return True + return False + + +def get_packages(module, patterns, only_installed=False): + packages_available_map_name_evrs = {} + rc, out, err = module.run_command( + [DNF_BIN, "-q", "repoquery"] + + (["--installed"] if only_installed else []) + + patterns, + check_rc=True) + + for p in out.split(): + # Extract the NEVRA pattern. + m = NEVRA_RE.match(p) + if not m: + module.fail_json( + msg="failed to parse nevra for %s" % p, + rc=rc, out=out, err=err) + + evr = "%s:%s-%s" % (m["epoch"], + m["version"], + m["release"]) + + packages_available_map_name_evrs.setdefault(m["name"], set()) + packages_available_map_name_evrs[m["name"]].add(evr) + return packages_available_map_name_evrs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="list", elements="str", default=[]), + raw=dict(type="bool", default=False), + state=dict(type="str", default="present", + choices=["present", "absent", "excluded", "clean"]), + ), + supports_check_mode=True, + ) + + patterns = module.params["name"] + raw = module.params["raw"] + state = module.params["state"] + changed = False + msg = "" + + # Check module pre-requisites. + if not os.path.exists(DNF_BIN): + module.fail_json(msg="%s was not found" % DNF_BIN) + if not os.path.exists(VERSIONLOCK_CONF): + module.fail_json(msg="plugin versionlock is required") + + # Check incompatible options. + if state == "clean" and patterns: + module.fail_json(msg="clean state is incompatible with a name list") + if state != "clean" and not patterns: + module.fail_json(msg="name list is required for %s state" % state) + + locklist_pre = do_versionlock(module, "list").split() + + specs_toadd = [] + specs_todelete = [] + + if state in ["present", "excluded"]: + + if raw: + # Add raw patterns as specs to add. + for p in patterns: + if ((p if state == "present" else "!" + p) + not in locklist_pre): + specs_toadd.append(p) + else: + # Get available packages that match the patterns. + packages_map_name_evrs = get_packages( + module, + patterns) + + # Get installed packages that match the patterns. + packages_installed_map_name_evrs = get_packages( + module, + patterns, + only_installed=True) + + # Obtain the list of package specs that require an entry in the + # locklist. This list is composed by: + # a) the non-installed packages list with all available + # versions + # b) the installed packages list + packages_map_name_evrs.update(packages_installed_map_name_evrs) + for name in packages_map_name_evrs: + for evr in packages_map_name_evrs[name]: + locklist_entry = "%s-%s.*" % (name, evr) + + if (locklist_entry if state == "present" + else "!%s" % locklist_entry) not in locklist_pre: + specs_toadd.append(locklist_entry) + + if specs_toadd and not module.check_mode: + cmd = "add" if state == "present" else "exclude" + msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw) + + elif state == "absent": + + if raw: + # Add raw patterns as specs to delete. + for p in patterns: + if p in locklist_pre: + specs_todelete.append(p) + + else: + # Get patterns that match the some line in the locklist. + for p in patterns: + for e in locklist_pre: + if match(e, p): + specs_todelete.append(p) + + if specs_todelete and not module.check_mode: + msg = do_versionlock( + module, "delete", patterns=specs_todelete, raw=raw) + + elif state == "clean": + specs_todelete = locklist_pre + + if specs_todelete and not module.check_mode: + msg = do_versionlock(module, "clear") + + if specs_toadd or specs_todelete: + changed = True + + response = { + "changed": changed, + "msg": msg, + "locklist_pre": locklist_pre, + "specs_toadd": specs_toadd, + "specs_todelete": specs_todelete + } + if not module.check_mode: + response["locklist_post"] = do_versionlock(module, "list").split() + else: + if state == "clean": + response["locklist_post"] = [] + + module.exit_json(**response) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/dnsimple.py b/ansible_collections/community/general/plugins/modules/dnsimple.py new file mode 100644 index 000000000..df41f73a6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dnsimple.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: dnsimple +short_description: Interface with dnsimple.com (a DNS hosting service) +description: + - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + account_email: + description: + - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. + - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." + - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" + type: str + account_api_token: + description: + - Account API token. See I(account_email) for more information. + type: str + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. + - If omitted, a list of domains will be returned. + - If domain is present but the domain doesn't exist, it will be created. + type: str + record: + description: + - Record to add, if blank a record for the domain will be created, supports the wildcard (*). + type: str + record_ids: + description: + - List of records to ensure they either exist or do not exist. + type: list + elements: str + type: + description: + - The type of DNS record to create. + choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] + type: str + ttl: + description: + - The TTL to give the new record in seconds. + default: 3600 + type: int + value: + description: + - Record value. + - Must be specified when trying to ensure a record exists. + type: str + priority: + description: + - Record priority. + type: int + state: + description: + - whether the record should exist or not. + choices: [ 'present', 'absent' ] + default: present + type: str + solo: + description: + - Whether the record should be the only one for that record type and record name. + - Only use with C(state) is set to C(present) on a record. + type: 'bool' + default: false + sandbox: + description: + - Use the DNSimple sandbox environment. + - Requires a dedicated account in the dnsimple sandbox environment. + - Check U(https://developer.dnsimple.com/sandbox/) for more information. + type: 'bool' + default: false + version_added: 3.5.0 +requirements: + - "dnsimple >= 2.0.0" +author: "Alex Coomans (@drcapulet)" +''' + +EXAMPLES = ''' +- name: Authenticate using email and API token and fetch all domains + community.general.dnsimple: + account_email: test@example.com + account_api_token: dummyapitoken + delegate_to: localhost + +- name: Delete a domain + community.general.dnsimple: + domain: my.com + state: absent + delegate_to: localhost + +- name: Create a test.my.com A record to point to 127.0.0.1 + community.general.dnsimple: + domain: my.com + record: test + type: A + value: 127.0.0.1 + delegate_to: localhost + register: record + +- name: Delete record using record_ids + community.general.dnsimple: + domain: my.com + record_ids: '{{ record["id"] }}' + state: absent + delegate_to: localhost + +- name: Create a my.com CNAME record to example.com + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: present + delegate_to: localhost + +- name: Change TTL value for a record + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + ttl: 600 + state: present + delegate_to: localhost + +- name: Delete the record + community.general.dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: absent + delegate_to: localhost +''' + +RETURN = r"""# """ + +import traceback +import re + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class DNSimpleV2(): + """class which uses dnsimple-python >= 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.pagination_per_page = 30 + self.dnsimple_client() + self.dnsimple_account() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general") + else: + msg = "Option account_email or account_api_token not provided. " \ + "Dnsimple authentiction with a .dnsimple config file is not " \ + "supported with dnsimple-python>=2.0.0" + raise DNSimpleException(msg) + client.identity.whoami() + self.client = client + + def dnsimple_account(self): + """select a dnsimple account. If a user token is used for authentication, + this user must only have access to a single account""" + account = self.client.identity.whoami().data.account + # user supplied a user token instead of account api token + if not account: + accounts = Accounts(self.client).list_accounts().data + if len(accounts) != 1: + msg = "The provided dnsimple token is a user token with multiple accounts." \ + "Use an account token or a user token with access to a single account." \ + "See https://support.dnsimple.com/articles/api-access-token/" + raise DNSimpleException(msg) + account = accounts[0] + self.account = account + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) + return [d.__dict__ for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ + except DNSimpleException as e: + exception_string = str(e.message) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.domains.create_domain(self.account.id, domain).data.__dict__ + + def delete_domain(self, domain): + """delete a single domain""" + self.client.domains.delete_domain(self.account.id, domain) + + def get_records(self, zone, dnsimple_filter=None): + """return dns ressource records which match a specified filter""" + records_list = self._get_paginated_result(self.client.zones.list_records, + account_id=self.account.id, + zone=zone, filter=dnsimple_filter) + return [d.__dict__ for d in records_list] + + def delete_record(self, domain, rid): + """delete a single dns ressource record""" + self.client.zones.delete_record(self.account.id, domain, rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns ressource record""" + zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) + result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ + return result + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns ressource record""" + zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) + return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ + + def _get_paginated_result(self, operation, **options): + """return all results of a paginated api response""" + records_pagination = operation(per_page=self.pagination_per_page, **options).pagination + result_list = [] + for page in range(1, records_pagination.total_pages + 1): + page_data = operation(per_page=self.pagination_per_page, page=page, **options).data + result_list.extend(page_data) + return result_list + + +DNSIMPLE_IMP_ERR = [] +HAS_DNSIMPLE = False +try: + # try to import dnsimple >= 2.0.0 + from dnsimple import Client, DNSimpleException + from dnsimple.service import Accounts + from dnsimple.version import version as dnsimple_version + from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput + HAS_DNSIMPLE = True +except ImportError: + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + + +def main(): + module = AnsibleModule( + argument_spec=dict( + account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), + account_api_token=dict(type='str', + no_log=True, + fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), + domain=dict(type='str'), + record=dict(type='str'), + record_ids=dict(type='list', elements='str'), + type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', + 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', + 'PTR', 'AAAA', 'SSHFP', 'HINFO', + 'POOL', 'CAA']), + ttl=dict(type='int', default=3600), + value=dict(type='str'), + priority=dict(type='int'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + solo=dict(type='bool', default=False), + sandbox=dict(type='bool', default=False), + ), + required_together=[ + ['record', 'value'] + ], + supports_check_mode=True, + ) + + if not HAS_DNSIMPLE: + module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) + + account_email = module.params.get('account_email') + account_api_token = module.params.get('account_api_token') + domain = module.params.get('domain') + record = module.params.get('record') + record_ids = module.params.get('record_ids') + record_type = module.params.get('type') + ttl = module.params.get('ttl') + value = module.params.get('value') + priority = module.params.get('priority') + state = module.params.get('state') + is_solo = module.params.get('solo') + sandbox = module.params.get('sandbox') + + DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] + + try: + if DNSIMPLE_MAJOR_VERSION < 2: + module.fail_json( + msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.') + ds = DNSimpleV2(account_email, account_api_token, sandbox, module) + # Let's figure out what operation we want to do + # No domain, return a list + if not domain: + all_domains = ds.get_all_domains() + module.exit_json(changed=False, result=all_domains) + + # Domain & No record + if record is None and not record_ids: + if domain.isdigit(): + typed_domain = int(domain) + else: + typed_domain = str(domain) + dr = ds.get_domain(typed_domain) + # domain does not exist + if state == 'present': + if dr: + module.exit_json(changed=False, result=dr) + else: + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.create_domain(domain) + module.exit_json(changed=True, result=response) + # state is absent + else: + if dr: + if not module.check_mode: + ds.delete_domain(domain) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # need the not none check since record could be an empty string + if record is not None: + if not record_type: + module.fail_json(msg="Missing the record type") + if not value: + module.fail_json(msg="Missing the record value") + + records_list = ds.get_records(domain, dnsimple_filter={'name': record}) + rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) + if state == 'present': + changed = False + if is_solo: + # delete any records that have the same name and record type + same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] + if rr: + same_type = [rid for rid in same_type if rid != rr['id']] + if same_type: + if not module.check_mode: + for rid in same_type: + ds.delete_record(domain, rid) + changed = True + if rr: + # check if we need to update + if rr['ttl'] != ttl or rr['priority'] != priority: + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.update_record(domain, rr['id'], ttl, priority) + module.exit_json(changed=True, result=response) + else: + module.exit_json(changed=changed, result=rr) + else: + # create it + if module.check_mode: + module.exit_json(changed=True) + else: + response = ds.create_record(domain, record, record_type, value, ttl, priority) + module.exit_json(changed=True, result=response) + # state is absent + else: + if rr: + if not module.check_mode: + ds.delete_record(domain, rr['id']) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + # Make sure these record_ids either all exist or none + if record_ids: + current_records = ds.get_records(domain, dnsimple_filter=None) + current_record_ids = [str(d['id']) for d in current_records] + wanted_record_ids = [str(r) for r in record_ids] + if state == 'present': + difference = list(set(wanted_record_ids) - set(current_record_ids)) + if difference: + module.fail_json(msg="Missing the following records: %s" % difference) + else: + module.exit_json(changed=False) + # state is absent + else: + difference = list(set(wanted_record_ids) & set(current_record_ids)) + if difference: + if not module.check_mode: + for rid in difference: + ds.delete_record(domain, rid) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + except DNSimpleException as e: + if DNSIMPLE_MAJOR_VERSION > 1: + module.fail_json(msg="DNSimple exception: %s" % e.message) + else: + module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) + module.fail_json(msg="Unknown what you wanted me to do") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dnsimple_info.py b/ansible_collections/community/general/plugins/modules/dnsimple_info.py new file mode 100644 index 000000000..52fd53303 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dnsimple_info.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Edward Hilgendorf, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: dnsimple_info + +short_description: Pull basic info from DNSimple API + +version_added: "4.2.0" + +description: Retrieve existing records and domains from DNSimple API. + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + name: + description: + - The domain name to retrieve info from. + - Will return all associated records for this domain if specified. + - If not specified, will return all domains associated with the account ID. + type: str + + account_id: + description: The account ID to query. + required: true + type: str + + api_key: + description: The API key to use. + required: true + type: str + + record: + description: + - The record to find. + - If specified, only this record will be returned instead of all records. + required: false + type: str + + sandbox: + description: Whether or not to use sandbox environment. + required: false + default: false + type: bool + +author: + - Edward Hilgendorf (@edhilgendorf) +''' + +EXAMPLES = r''' +- name: Get all domains from an account + community.general.dnsimple_info: + account_id: "1234" + api_key: "1234" + +- name: Get all records from a domain + community.general.dnsimple_info: + name: "example.com" + account_id: "1234" + api_key: "1234" + +- name: Get all info from a matching record + community.general.dnsimple_info: + name: "example.com" + record: "subdomain" + account_id: "1234" + api_key: "1234" +''' + +RETURN = r''' +dnsimple_domain_info: + description: Returns a list of dictionaries of all domains associated with the supplied account ID. + type: list + elements: dict + returned: success when I(name) is not specified + sample: + - account_id: 1234 + created_at: '2021-10-16T21:25:42Z' + id: 123456 + last_transferred_at: + name: example.com + reverse: false + secondary: false + updated_at: '2021-11-10T20:22:50Z' + contains: + account_id: + description: The account ID. + type: int + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + last_transferred_at: + description: Date the domain was transferred, or empty if not. + type: str + name: + description: Name of the record. + type: str + reverse: + description: Whether or not it is a reverse zone record. + type: bool + updated_at: + description: When the domain entry was updated. + type: str + +dnsimple_records_info: + description: Returns a list of dictionaries with all records for the domain supplied. + type: list + elements: dict + returned: success when I(name) is specified, but I(record) is not + sample: + - content: ns1.dnsimple.com admin.dnsimple.com + created_at: '2021-10-16T19:07:34Z' + id: 12345 + name: 'catheadbiscuit' + parent_id: null + priority: null + regions: + - global + system_record: true + ttl: 3600 + type: SOA + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +dnsimple_record_info: + description: Returns a list of dictionaries that match the record supplied. + returned: success when I(name) and I(record) are specified + type: list + elements: dict + sample: + - content: 1.2.3.4 + created_at: '2021-11-15T23:55:51Z' + id: 123456 + name: catheadbiscuit + parent_id: null + priority: null + regions: + - global + system_record: false + ttl: 3600 + type: A + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("requests"): + from requests import Request, Session + + +def build_url(account, key, is_sandbox): + headers = {'Accept': 'application/json', + 'Authorization': 'Bearer ' + key} + url = 'https://api{sandbox}.dnsimple.com/'.format( + sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account + req = Request(url=url, headers=headers) + prepped_request = req.prepare() + return prepped_request + + +def iterate_data(module, request_object): + base_url = request_object.url + response = Session().send(request_object) + if 'pagination' in response.json(): + data = response.json()["data"] + pages = response.json()["pagination"]["total_pages"] + if int(pages) > 1: + for page in range(1, pages): + page = page + 1 + request_object.url = base_url + '&page=' + str(page) + new_results = Session().send(request_object) + data = data + new_results.json()["data"] + return data + else: + module.fail_json('API Call failed, check ID, key and sandbox values') + + +def record_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def domain_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def account_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def main(): + # define available arguments/parameters a user can pass to the module + fields = { + "account_id": {"required": True, "type": "str"}, + "api_key": {"required": True, "type": "str", "no_log": True}, + "name": {"required": False, "type": "str"}, + "record": {"required": False, "type": "str"}, + "sandbox": {"required": False, "type": "bool", "default": False} + } + + result = { + 'changed': False + } + + module = AnsibleModule( + argument_spec=fields, + supports_check_mode=True + ) + + params = module.params + req = build_url(params['account_id'], + params['api_key'], + params['sandbox']) + + deps.validate(module) + + # At minimum we need account and key + if params['account_id'] and params['api_key']: + # If we have a record return info on that record + if params['name'] and params['record']: + result['dnsimple_record_info'] = record_info(module, req) + module.exit_json(**result) + + # If we have the account only and domain, return records for the domain + elif params['name']: + result['dnsimple_records_info'] = domain_info(module, req) + module.exit_json(**result) + + # If we have the account only, return domains + else: + result['dnsimple_domain_info'] = account_info(module, req) + module.exit_json(**result) + else: + module.fail_json(msg="Need at least account_id and api_key") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py new file mode 100644 index 000000000..44587ca39 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py @@ -0,0 +1,724 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: dnsmadeeasy +short_description: Interface with dnsmadeeasy.com (a DNS hosting service) +description: + - > + Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or + monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + account_key: + description: + - Account API Key. + required: true + type: str + + account_secret: + description: + - Account Secret Key. + required: true + type: str + + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster + resolution + required: true + type: str + + sandbox: + description: + - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. + type: bool + default: false + + record_name: + description: + - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless + of the state argument. + type: str + + record_type: + description: + - Record type. + choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] + type: str + + record_value: + description: + - > + Record value. HTTPRED: , MX: , NS: , PTR: , + SRV: , TXT: " + - > + If record_value is not specified; no changes will be made and the record will be returned in 'result' + (in other words, this module can be used to fetch a record's current id, type, and ttl) + type: str + + record_ttl: + description: + - record's "Time to live". Number of seconds the record remains cached in DNS servers. + default: 1800 + type: int + + state: + description: + - whether the record should exist or not + required: true + choices: [ 'present', 'absent' ] + type: str + + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + + monitor: + description: + - If C(true), add or change the monitor. This is applicable only for A records. + type: bool + default: false + + systemDescription: + description: + - Description used by the monitor. + default: '' + type: str + + maxEmails: + description: + - Number of emails sent to the contact list by the monitor. + default: 1 + type: int + + protocol: + description: + - Protocol used by the monitor. + default: 'HTTP' + choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] + type: str + + port: + description: + - Port used by the monitor. + default: 80 + type: int + + sensitivity: + description: + - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. + default: 'Medium' + choices: ['Low', 'Medium', 'High'] + type: str + + contactList: + description: + - Name or id of the contact list that the monitor will notify. + - The default C('') means the Account Owner. + type: str + + httpFqdn: + description: + - The fully qualified domain name used by the monitor. + type: str + + httpFile: + description: + - The file at the Fqdn that the monitor queries for HTTP or HTTPS. + type: str + + httpQueryString: + description: + - The string in the httpFile that the monitor queries for HTTP or HTTPS. + type: str + + failover: + description: + - If C(true), add or change the failover. This is applicable only for A records. + type: bool + default: false + + autoFailover: + description: + - If true, fallback to the primary IP address is manual after a failover. + - If false, fallback to the primary IP address is automatic after a failover. + type: bool + default: false + + ip1: + description: + - Primary IP address for the failover. + - Required if adding or changing the monitor or failover. + type: str + + ip2: + description: + - Secondary IP address for the failover. + - Required if adding or changing the failover. + type: str + + ip3: + description: + - Tertiary IP address for the failover. + type: str + + ip4: + description: + - Quaternary IP address for the failover. + type: str + + ip5: + description: + - Quinary IP address for the failover. + type: str + +notes: + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few + seconds of actual time by using NTP. + - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. + These values can be be registered and used in your playbooks. + - Only A records can have a monitor or failover. + - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. + - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. + - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. + +requirements: [ hashlib, hmac ] +author: "Brice Burgess (@briceburg)" +''' + +EXAMPLES = ''' +- name: Fetch my.com domain records + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + register: response + +- name: Create a record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + +- name: Update the previously created record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_value: 192.0.2.23 + +- name: Fetch a specific record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + register: response + +- name: Delete a record + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + record_type: A + state: absent + record_name: test + +- name: Add a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + +- name: Add a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + ip3: 127.0.0.4 + ip4: 127.0.0.5 + ip5: 127.0.0.6 + +- name: Add a monitor + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: true + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: my contact list + +- name: Add a monitor with http options + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: true + ip1: 127.0.0.2 + protocol: HTTP # default + port: 80 # default + maxEmails: 1 + systemDescription: Monitor Test A record + contactList: 1174 # contact list id + httpFqdn: http://my.com + httpFile: example + httpQueryString: some string + +- name: Add a monitor and a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: true + ip1: 127.0.0.2 + ip2: 127.0.0.3 + monitor: true + protocol: HTTPS + port: 443 + maxEmails: 1 + systemDescription: monitoring my.com status + contactList: emergencycontacts + +- name: Remove a failover + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + failover: false + +- name: Remove a monitor + community.general.dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 + monitor: false +''' + +# ============================================ +# DNSMadeEasy module specific support methods. +# + +import json +import hashlib +import hmac +import locale +from time import strftime, gmtime + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six import string_types + + +class DME2(object): + + def __init__(self, apikey, secret, domain, sandbox, module): + self.module = module + + self.api = apikey + self.secret = secret + + if sandbox: + self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' + self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) + else: + self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' + + self.domain = str(domain) + self.domain_map = None # ["domain_name"] => ID + self.record_map = None # ["record_name"] => ID + self.records = None # ["record_ID"] => + self.all_records = None + self.contactList_map = None # ["contactList_name"] => ID + + # Lookup the domain ID if passed as a domain name vs. ID + if not self.domain.isdigit(): + self.domain = self.getDomainByName(self.domain)['id'] + + self.record_url = 'dns/managed/' + str(self.domain) + '/records' + self.monitor_url = 'monitor' + self.contactList_url = 'contactList' + + def _headers(self): + currTime = self._get_date() + hashstring = self._create_hash(currTime) + headers = {'x-dnsme-apiKey': self.api, + 'x-dnsme-hmac': hashstring, + 'x-dnsme-requestDate': currTime, + 'content-type': 'application/json'} + return headers + + def _get_date(self): + locale.setlocale(locale.LC_TIME, 'C') + return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) + + def _create_hash(self, rightnow): + return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() + + def query(self, resource, method, data=None): + url = self.baseurl + resource + if data and not isinstance(data, string_types): + data = urlencode(data) + + response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) + + try: + return json.load(response) + except Exception: + return {} + + def getDomain(self, domain_id): + if not self.domain_map: + self._instMap('domain') + + return self.domains.get(domain_id, False) + + def getDomainByName(self, domain_name): + if not self.domain_map: + self._instMap('domain') + + return self.getDomain(self.domain_map.get(domain_name, 0)) + + def getDomains(self): + return self.query('dns/managed', 'GET')['data'] + + def getRecord(self, record_id): + if not self.record_map: + self._instMap('record') + + return self.records.get(record_id, False) + + # Try to find a single record matching this one. + # How we do this depends on the type of record. For instance, there + # can be several MX records for a single record_name while there can + # only be a single CNAME for a particular record_name. Note also that + # there can be several records with different types for a single name. + def getMatchingRecord(self, record_name, record_type, record_value): + # Get all the records if not already cached + if not self.all_records: + self.all_records = self.getRecords() + + if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: + for result in self.all_records: + if result['name'] == record_name and result['type'] == record_type: + return result + return False + elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: + for result in self.all_records: + if record_type == "MX": + value = record_value.split(" ")[1] + # Note that TXT records are surrounded by quotes in the API response. + elif record_type == "TXT": + value = '"{0}"'.format(record_value) + elif record_type == "SRV": + value = record_value.split(" ")[3] + else: + value = record_value + if result['name'] == record_name and result['type'] == record_type and result['value'] == value: + return result + return False + else: + raise Exception('record_type not yet supported') + + def getRecords(self): + return self.query(self.record_url, 'GET')['data'] + + def _instMap(self, type): + # @TODO cache this call so it's executed only once per ansible execution + map = {} + results = {} + + # iterate over e.g. self.getDomains() || self.getRecords() + for result in getattr(self, 'get' + type.title() + 's')(): + + map[result['name']] = result['id'] + results[result['id']] = result + + # e.g. self.domain_map || self.record_map + setattr(self, type + '_map', map) + setattr(self, type + 's', results) # e.g. self.domains || self.records + + def prepareRecord(self, data): + return json.dumps(data, separators=(',', ':')) + + def createRecord(self, data): + # @TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url, 'POST', data) + + def updateRecord(self, record_id, data): + # @TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url + '/' + str(record_id), 'PUT', data) + + def deleteRecord(self, record_id): + # @TODO remove record from the cache when impleneted + return self.query(self.record_url + '/' + str(record_id), 'DELETE') + + def getMonitor(self, record_id): + return self.query(self.monitor_url + '/' + str(record_id), 'GET') + + def updateMonitor(self, record_id, data): + return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) + + def prepareMonitor(self, data): + return json.dumps(data, separators=(',', ':')) + + def getContactList(self, contact_list_id): + if not self.contactList_map: + self._instMap('contactList') + + return self.contactLists.get(contact_list_id, False) + + def getContactlists(self): + return self.query(self.contactList_url, 'GET')['data'] + + def getContactListByName(self, name): + if not self.contactList_map: + self._instMap('contactList') + + return self.getContactList(self.contactList_map.get(name, 0)) + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_key=dict(required=True, no_log=True), + account_secret=dict(required=True, no_log=True), + domain=dict(required=True), + sandbox=dict(default=False, type='bool'), + state=dict(required=True, choices=['present', 'absent']), + record_name=dict(required=False), + record_type=dict(required=False, choices=[ + 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), + record_value=dict(required=False), + record_ttl=dict(required=False, default=1800, type='int'), + monitor=dict(default=False, type='bool'), + systemDescription=dict(default=''), + maxEmails=dict(default=1, type='int'), + protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), + port=dict(default=80, type='int'), + sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), + contactList=dict(default=None), + httpFqdn=dict(required=False), + httpFile=dict(required=False), + httpQueryString=dict(required=False), + failover=dict(default=False, type='bool'), + autoFailover=dict(default=False, type='bool'), + ip1=dict(required=False), + ip2=dict(required=False), + ip3=dict(required=False), + ip4=dict(required=False), + ip5=dict(required=False), + validate_certs=dict(default=True, type='bool'), + ), + required_together=[ + ['record_value', 'record_ttl', 'record_type'] + ], + required_if=[ + ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], + ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] + ] + ) + + protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) + sensitivities = dict(Low=8, Medium=5, High=3) + + DME = DME2(module.params["account_key"], module.params[ + "account_secret"], module.params["domain"], module.params["sandbox"], module) + state = module.params["state"] + record_name = module.params["record_name"] + record_type = module.params["record_type"] + record_value = module.params["record_value"] + + # Follow Keyword Controlled Behavior + if record_name is None: + domain_records = DME.getRecords() + if not domain_records: + module.fail_json( + msg="The requested domain name is not accessible with this api_key; try using its ID if known.") + module.exit_json(changed=False, result=domain_records) + + # Fetch existing record + Build new one + current_record = DME.getMatchingRecord(record_name, record_type, record_value) + new_record = {'name': record_name} + for i in ["record_value", "record_type", "record_ttl"]: + if not module.params[i] is None: + new_record[i[len("record_"):]] = module.params[i] + # Special handling for mx record + if new_record["type"] == "MX": + new_record["mxLevel"] = new_record["value"].split(" ")[0] + new_record["value"] = new_record["value"].split(" ")[1] + + # Special handling for SRV records + if new_record["type"] == "SRV": + new_record["priority"] = new_record["value"].split(" ")[0] + new_record["weight"] = new_record["value"].split(" ")[1] + new_record["port"] = new_record["value"].split(" ")[2] + new_record["value"] = new_record["value"].split(" ")[3] + + # Fetch existing monitor if the A record indicates it should exist and build the new monitor + current_monitor = dict() + new_monitor = dict() + if current_record and current_record['type'] == 'A' and current_record.get('monitor'): + current_monitor = DME.getMonitor(current_record['id']) + + # Build the new monitor + for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', + 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', + 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: + if module.params[i] is not None: + if i == 'protocol': + # The API requires protocol to be a numeric in the range 1-6 + new_monitor['protocolId'] = protocols[module.params[i]] + elif i == 'sensitivity': + # The API requires sensitivity to be a numeric of 8, 5, or 3 + new_monitor[i] = sensitivities[module.params[i]] + elif i == 'contactList': + # The module accepts either the name or the id of the contact list + contact_list_id = module.params[i] + if not contact_list_id.isdigit() and contact_list_id != '': + contact_list = DME.getContactListByName(contact_list_id) + if not contact_list: + module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) + contact_list_id = contact_list.get('id', '') + new_monitor['contactListId'] = contact_list_id + else: + # The module option names match the API field names + new_monitor[i] = module.params[i] + + # Compare new record against existing one + record_changed = False + if current_record: + for i in new_record: + # Remove leading and trailing quote character from values because TXT records + # are surrounded by quotes. + if str(current_record[i]).strip('"') != str(new_record[i]): + record_changed = True + new_record['id'] = str(current_record['id']) + + monitor_changed = False + if current_monitor: + for i in new_monitor: + if str(current_monitor.get(i)) != str(new_monitor[i]): + monitor_changed = True + + # Follow Keyword Controlled Behavior + if state == 'present': + # return the record if no value is specified + if "value" not in new_record: + if not current_record: + module.fail_json( + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + # create record and monitor as the record does not exist + if not current_record: + record = DME.createRecord(DME.prepareRecord(new_record)) + if new_monitor.get('monitor') and record_type == "A": + monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) + module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) + else: + module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor)) + + # update the record + updated = False + if record_changed: + DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) + updated = True + if monitor_changed: + DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) + updated = True + if updated: + module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) + + # return the record (no changes) + module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) + + elif state == 'absent': + changed = False + # delete the record (and the monitor/failover) if it exists + if current_record: + DME.deleteRecord(current_record['id']) + module.exit_json(changed=True) + + # record does not exist, return w/o change. + module.exit_json(changed=changed) + + else: + module.fail_json( + msg="'%s' is an unknown value for the state argument" % state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/ansible_collections/community/general/plugins/modules/dpkg_divert.py new file mode 100644 index 000000000..4a1651f51 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/dpkg_divert.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2020, Yann Amar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: dpkg_divert +short_description: Override a debian package's version of a file +version_added: '0.2.0' +author: + - quidame (@quidame) +description: + - A diversion is for C(dpkg) the knowledge that only a given package + (or the local administrator) is allowed to install a file at a given + location. Other packages shipping their own version of this file will + be forced to I(divert) it, i.e. to install it at another location. It + allows one to keep changes in a file provided by a debian package by + preventing its overwrite at package upgrade. + - This module manages diversions of debian packages files using the + C(dpkg-divert) commandline tool. It can either create or remove a + diversion for a given file, but also update an existing diversion + to modify its I(holder) and/or its I(divert) location. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - The original and absolute path of the file to be diverted or + undiverted. This path is unique, i.e. it is not possible to get + two diversions for the same I(path). + required: true + type: path + state: + description: + - When I(state=absent), remove the diversion of the specified + I(path); when I(state=present), create the diversion if it does + not exist, or update its package I(holder) or I(divert) location, + if it already exists. + type: str + default: present + choices: [absent, present] + holder: + description: + - The name of the package whose copy of file is not diverted, also + known as the diversion holder or the package the diversion belongs + to. + - The actual package does not have to be installed or even to exist + for its name to be valid. If not specified, the diversion is hold + by 'LOCAL', that is reserved by/for dpkg for local diversions. + - This parameter is ignored when I(state=absent). + type: str + divert: + description: + - The location where the versions of file will be diverted. + - Default is to add suffix C(.distrib) to the file path. + - This parameter is ignored when I(state=absent). + type: path + rename: + description: + - Actually move the file aside (when I(state=present)) or back (when + I(state=absent)), but only when changing the state of the diversion. + This parameter has no effect when attempting to add a diversion that + already exists or when removing an unexisting one. + - Unless I(force=true), renaming fails if the destination file already + exists (this lock being a dpkg-divert feature, and bypassing it being + a module feature). + type: bool + default: false + force: + description: + - When I(rename=true) and I(force=true), renaming is performed even if + the target of the renaming exists, i.e. the existing contents of the + file at this location will be lost. + - This parameter is ignored when I(rename=false). + type: bool + default: false +requirements: + - dpkg-divert >= 1.15.0 (Debian family) +''' + +EXAMPLES = r''' +- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place + community.general.dpkg_divert: + path: /usr/bin/busybox + +- name: Divert /usr/bin/busybox by package 'branding' + community.general.dpkg_divert: + path: /usr/bin/busybox + holder: branding + +- name: Divert and rename busybox to busybox.dpkg-divert + community.general.dpkg_divert: + path: /usr/bin/busybox + divert: /usr/bin/busybox.dpkg-divert + rename: true + +- name: Remove the busybox diversion and move the diverted file back + community.general.dpkg_divert: + path: /usr/bin/busybox + state: absent + rename: true + force: true +''' + +RETURN = r''' +commands: + description: The dpkg-divert commands ran internally by the module. + type: list + returned: on_success + elements: str + sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc" +messages: + description: The dpkg-divert relevant messages (stdout or stderr). + type: list + returned: on_success + elements: str + sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'" +diversion: + description: The status of the diversion after task execution. + type: dict + returned: always + contains: + divert: + description: The location of the diverted file. + type: str + holder: + description: The package holding the diversion. + type: str + path: + description: The path of the file to divert/undivert. + type: str + state: + description: The state of the diversion. + type: str + sample: + { + "divert": "/etc/foobarrc.distrib", + "holder": "LOCAL", + "path": "/etc/foobarrc", + "state": "present" + } +''' + + +import re +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +def diversion_state(module, command, path): + diversion = dict(path=path, state='absent', divert=None, holder=None) + rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True) + if out: + diversion['state'] = 'present' + diversion['holder'] = out.rstrip() + rc, out, err = module.run_command([command, '--truename', path], check_rc=True) + diversion['divert'] = out.rstrip() + return diversion + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True, type='path'), + state=dict(required=False, type='str', default='present', choices=['absent', 'present']), + holder=dict(required=False, type='str'), + divert=dict(required=False, type='path'), + rename=dict(required=False, type='bool', default=False), + force=dict(required=False, type='bool', default=False), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + holder = module.params['holder'] + divert = module.params['divert'] + rename = module.params['rename'] + force = module.params['force'] + + diversion_wanted = dict(path=path, state=state) + changed = False + + DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) + MAINCOMMAND = [DPKG_DIVERT] + + # Option --listpackage is needed and comes with 1.15.0 + rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) + [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] + if LooseVersion(current_version) < LooseVersion("1.15.0"): + module.fail_json(msg="Unsupported dpkg version (<1.15.0).") + no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) + + b_path = to_bytes(path, errors='surrogate_or_strict') + path_exists = os.path.exists(b_path) + # Used for things not doable with a single dpkg-divert command (as forced + # renaming of files, and diversion's 'holder' or 'divert' updates). + target_exists = False + truename_exists = False + + diversion_before = diversion_state(module, DPKG_DIVERT, path) + if diversion_before['state'] == 'present': + b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') + truename_exists = os.path.exists(b_divert) + + # Append options as requested in the task parameters, but ignore some of + # them when removing the diversion. + if rename: + MAINCOMMAND.append('--rename') + elif no_rename_is_supported: + MAINCOMMAND.append('--no-rename') + + if state == 'present': + if holder and holder != 'LOCAL': + MAINCOMMAND.extend(['--package', holder]) + diversion_wanted['holder'] = holder + else: + MAINCOMMAND.append('--local') + diversion_wanted['holder'] = 'LOCAL' + + if divert: + MAINCOMMAND.extend(['--divert', divert]) + target = divert + else: + target = '%s.distrib' % path + + MAINCOMMAND.extend(['--add', path]) + diversion_wanted['divert'] = target + b_target = to_bytes(target, errors='surrogate_or_strict') + target_exists = os.path.exists(b_target) + + else: + MAINCOMMAND.extend(['--remove', path]) + diversion_wanted['divert'] = None + diversion_wanted['holder'] = None + + # Start to populate the returned objects. + diversion = diversion_before.copy() + maincommand = ' '.join(MAINCOMMAND) + commands = [maincommand] + + if module.check_mode or diversion_wanted == diversion_before: + MAINCOMMAND.insert(1, '--test') + diversion_after = diversion_wanted + + # Just try and see + rc, stdout, stderr = module.run_command(MAINCOMMAND) + + if rc == 0: + messages = [stdout.rstrip()] + + # else... cases of failure with dpkg-divert are: + # - The diversion does not belong to the same package (or LOCAL) + # - The divert filename is not the same (e.g. path.distrib != path.divert) + # - The renaming is forbidden by dpkg-divert (i.e. both the file and the + # diverted file exist) + + elif state != diversion_before['state']: + # There should be no case with 'divert' and 'holder' when creating the + # diversion from none, and they're ignored when removing the diversion. + # So this is all about renaming... + if rename and path_exists and ( + (state == 'absent' and truename_exists) or + (state == 'present' and target_exists)): + if not force: + msg = "Set 'force' param to True to force renaming of files." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + else: + msg = "Unexpected error while changing state of the diversion." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + + to_remove = path + if state == 'present': + to_remove = target + + if not module.check_mode: + try: + b_remove = to_bytes(to_remove, errors='surrogate_or_strict') + os.unlink(b_remove) + except OSError as e: + msg = 'Failed to remove %s: %s' % (to_remove, to_native(e)) + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) + + messages = [stdout.rstrip()] + + # The situation is that we want to modify the settings (holder or divert) + # of an existing diversion. dpkg-divert does not handle this, and we have + # to remove the existing diversion first, and then set a new one. + else: + RMDIVERSION = [DPKG_DIVERT, '--remove', path] + if no_rename_is_supported: + RMDIVERSION.insert(1, '--no-rename') + rmdiversion = ' '.join(RMDIVERSION) + + if module.check_mode: + RMDIVERSION.insert(1, '--test') + + if rename: + MAINCOMMAND.remove('--rename') + if no_rename_is_supported: + MAINCOMMAND.insert(1, '--no-rename') + maincommand = ' '.join(MAINCOMMAND) + + commands = [rmdiversion, maincommand] + rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) + + if module.check_mode: + messages = [rmdout.rstrip(), 'Running in check mode'] + else: + rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) + messages = [rmdout.rstrip(), stdout.rstrip()] + + # Avoid if possible to orphan files (i.e. to dereference them in diversion + # database but let them in place), but do not make renaming issues fatal. + # BTW, this module is not about state of files involved in the diversion. + old = diversion_before['divert'] + new = diversion_wanted['divert'] + if new != old: + b_old = to_bytes(old, errors='surrogate_or_strict') + b_new = to_bytes(new, errors='surrogate_or_strict') + if os.path.exists(b_old) and not os.path.exists(b_new): + try: + os.rename(b_old, b_new) + except OSError as e: + pass + + if not module.check_mode: + diversion_after = diversion_state(module, DPKG_DIVERT, path) + + diversion = diversion_after.copy() + diff = dict() + if module._diff: + diff['before'] = diversion_before + diff['after'] = diversion_after + + if diversion_after != diversion_before: + changed = True + + if diversion_after == diversion_wanted: + module.exit_json(changed=changed, diversion=diversion, + commands=commands, messages=messages, diff=diff) + else: + msg = "Unexpected error: see stdout and stderr for details." + module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, + stderr=stderr, stdout=stdout, diversion=diversion) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/easy_install.py b/ansible_collections/community/general/plugins/modules/easy_install.py new file mode 100644 index 000000000..564493180 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/easy_install.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: easy_install +short_description: Installs Python libraries +description: + - Installs Python libraries, optionally in a I(virtualenv) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - A Python library name + required: true + virtualenv: + type: str + description: + - an optional I(virtualenv) directory path to install into. If the + I(virtualenv) does not exist, it is created automatically + virtualenv_site_packages: + description: + - Whether the virtual environment will inherit packages from the + global site-packages directory. Note that if this setting is + changed on an already existing virtual environment it will not + have any effect, the environment must be deleted and newly + created. + type: bool + default: false + virtualenv_command: + type: str + description: + - The command to create the virtual environment with. For example + C(pyvenv), C(virtualenv), C(virtualenv2). + default: virtualenv + executable: + type: str + description: + - The explicit executable or a pathname to the executable to be used to + run easy_install for a specific version of Python installed in the + system. For example C(easy_install-3.3), if there are both Python 2.7 + and 3.3 installations in the system and you want to run easy_install + for the Python 3.3 installation. + default: easy_install + state: + type: str + description: + - The desired state of the library. C(latest) ensures that the latest version is installed. + choices: [present, latest] + default: present +notes: + - Please note that the C(easy_install) module can only install Python + libraries. Thus this module is not able to remove libraries. It is + generally recommended to use the M(ansible.builtin.pip) module which you can first install + using M(community.general.easy_install). + - Also note that I(virtualenv) must be installed on the remote host if the + C(virtualenv) parameter is specified. +requirements: [ "virtualenv" ] +author: "Matt Wright (@mattupstate)" +''' + +EXAMPLES = ''' +- name: Install or update pip + community.general.easy_install: + name: pip + state: latest + +- name: Install Bottle into the specified virtualenv + community.general.easy_install: + name: bottle + virtualenv: /webapps/myapp/venv +''' + +import os +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule + + +def install_package(module, name, easy_install, executable_arguments): + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) + rc, out, err = module.run_command(cmd) + return rc, out, err + + +def _is_package_installed(module, name, easy_install, executable_arguments): + # Copy and add to the arguments + executable_arguments = executable_arguments[:] + executable_arguments.append('--dry-run') + rc, out, err = install_package(module, name, easy_install, executable_arguments) + if rc: + module.fail_json(msg=err) + return 'Downloading' not in out + + +def _get_easy_install(module, env=None, executable=None): + candidate_easy_inst_basenames = ['easy_install'] + easy_install = None + if executable is not None: + if os.path.isabs(executable): + easy_install = executable + else: + candidate_easy_inst_basenames.insert(0, executable) + if easy_install is None: + if env is None: + opt_dirs = [] + else: + # Try easy_install with the virtualenv directory first. + opt_dirs = ['%s/bin' % env] + for basename in candidate_easy_inst_basenames: + easy_install = module.get_bin_path(basename, False, opt_dirs) + if easy_install is not None: + break + # easy_install should have been found by now. The final call to + # get_bin_path will trigger fail_json. + if easy_install is None: + basename = candidate_easy_inst_basenames[0] + easy_install = module.get_bin_path(basename, True, opt_dirs) + return easy_install + + +def main(): + arg_spec = dict( + name=dict(required=True), + state=dict(required=False, + default='present', + choices=['present', 'latest'], + type='str'), + virtualenv=dict(default=None, required=False), + virtualenv_site_packages=dict(default=False, type='bool'), + virtualenv_command=dict(default='virtualenv', required=False), + executable=dict(default='easy_install', required=False), + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + env = module.params['virtualenv'] + executable = module.params['executable'] + site_packages = module.params['virtualenv_site_packages'] + virtualenv_command = module.params['virtualenv_command'] + executable_arguments = [] + if module.params['state'] == 'latest': + executable_arguments.append('--upgrade') + + rc = 0 + err = '' + out = '' + + if env: + virtualenv = module.get_bin_path(virtualenv_command, True) + + if not os.path.exists(os.path.join(env, 'bin', 'activate')): + if module.check_mode: + module.exit_json(changed=True) + command = '%s %s' % (virtualenv, env) + if site_packages: + command += ' --system-site-packages' + cwd = tempfile.gettempdir() + rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) + + rc += rc_venv + out += out_venv + err += err_venv + + easy_install = _get_easy_install(module, env, executable) + + cmd = None + changed = False + installed = _is_package_installed(module, name, easy_install, executable_arguments) + + if not installed: + if module.check_mode: + module.exit_json(changed=True) + rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments) + + rc += rc_easy_inst + out += out_easy_inst + err += err_easy_inst + + changed = True + + if rc != 0: + module.fail_json(msg=err, cmd=cmd) + + module.exit_json(changed=changed, binary=easy_install, + name=name, virtualenv=env) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/ansible_collections/community/general/plugins/modules/ejabberd_user.py new file mode 100644 index 000000000..397207ae6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ejabberd_user.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013, Peter Sprygada +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ejabberd_user +author: "Peter Sprygada (@privateip)" +short_description: Manages users for ejabberd servers +requirements: + - ejabberd with mod_admin_extra +description: + - This module provides user management for ejabberd servers +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + type: str + description: + - the name of the user to manage + required: true + host: + type: str + description: + - the ejabberd host associated with this username + required: true + password: + type: str + description: + - the password to assign to the username + required: false + logging: + description: + - enables or disables the local syslog facility for this module + required: false + default: false + type: bool + state: + type: str + description: + - describe the desired state of the user to be managed + required: false + default: 'present' + choices: [ 'present', 'absent' ] +notes: + - Password parameter is required for state == present only + - Passwords must be stored in clear text for this release + - The ejabberd configuration file must include mod_admin_extra as a module. +''' +EXAMPLES = ''' +# Example playbook entries using the ejabberd_user module to manage users state. + +- name: Create a user if it does not exist + community.general.ejabberd_user: + username: test + host: server + password: password + +- name: Delete a user if it exists + community.general.ejabberd_user: + username: test + host: server + state: absent +''' + +import syslog + +from ansible.module_utils.basic import AnsibleModule + + +class EjabberdUser(object): + """ This object represents a user resource for an ejabberd server. The + object manages user creation and deletion using ejabberdctl. The following + commands are currently supported: + * ejabberdctl register + * ejabberdctl deregister + """ + + def __init__(self, module): + self.module = module + self.logging = module.params.get('logging') + self.state = module.params.get('state') + self.host = module.params.get('host') + self.user = module.params.get('username') + self.pwd = module.params.get('password') + + @property + def changed(self): + """ This method will check the current user and see if the password has + changed. It will return True if the user does not match the supplied + credentials and False if it does not + """ + return self.run_command('check_password', [self.user, self.host, self.pwd]) + + @property + def exists(self): + """ This method will check to see if the supplied username exists for + host specified. If the user exists True is returned, otherwise False + is returned + """ + return self.run_command('check_account', [self.user, self.host]) + + def log(self, entry): + """ This method will log information to the local syslog facility """ + if self.logging: + syslog.openlog('ansible-%s' % self.module._name) + syslog.syslog(syslog.LOG_NOTICE, entry) + + def run_command(self, cmd, options): + """ This method will run the any command specified and return the + returns using the Ansible common module + """ + cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options + self.log('command: %s' % " ".join(cmd)) + return self.module.run_command(cmd) + + def update(self): + """ The update method will update the credentials for the user provided + """ + return self.run_command('change_password', [self.user, self.host, self.pwd]) + + def create(self): + """ The create method will create a new user on the host with the + password provided + """ + return self.run_command('register', [self.user, self.host, self.pwd]) + + def delete(self): + """ The delete method will delete the user from the host + """ + return self.run_command('unregister', [self.user, self.host]) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + username=dict(required=True, type='str'), + password=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger? + ), + required_if=[ + ('state', 'present', ['password']), + ], + supports_check_mode=True, + ) + + obj = EjabberdUser(module) + + rc = None + result = dict(changed=False) + + if obj.state == 'absent': + if obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.delete() + if rc != 0: + module.fail_json(msg=err, rc=rc) + + elif obj.state == 'present': + if not obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.create() + elif obj.changed: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.update() + if rc is not None and rc != 0: + module.fail_json(msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py new file mode 100644 index 000000000..cd4bb45de --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2015, Mathew Davies +# Copyright (c) 2017, Sam Doran +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: elasticsearch_plugin +short_description: Manage Elasticsearch plugins +description: + - Manages Elasticsearch plugins. +author: + - Mathew Davies (@ThePixelDeveloper) + - Sam Doran (@samdoran) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + src: + description: + - Optionally set the source location to retrieve the plugin from. This can be a file:// + URL to install from a local file, or a remote URL. If this is not set, the plugin + location is just based on the name. + - The name parameter must match the descriptor in the plugin ZIP specified. + - Is only used if the state would change, which is solely checked based on the name + parameter. If, for example, the plugin is already installed, changing this has no + effect. + - For ES 1.x use url. + required: false + type: str + url: + description: + - Set exact URL to download the plugin from (Only works for ES 1.x). + - For ES 2.x and higher, use src. + required: false + type: str + timeout: + description: + - "Timeout setting: 30s, 1m, 1h..." + - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. + default: 1m + type: str + force: + description: + - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." + default: false + type: bool + plugin_bin: + description: + - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. + - The default changed in Ansible 2.4 to None. + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch + default: /usr/share/elasticsearch/plugins/ + type: path + proxy_host: + description: + - Proxy host to use during plugin installation + type: str + proxy_port: + description: + - Proxy port to use during plugin installation + type: str + version: + description: + - Version of the plugin to be installed. + If plugin exists with previous version, it will NOT be updated + type: str +''' + +EXAMPLES = ''' +- name: Install Elasticsearch Head plugin in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + state: present + +- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + version: 2.0.0 + +- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x + community.general.elasticsearch_plugin: + name: mobz/elasticsearch-head + state: absent + +- name: Install a specific plugin in Elasticsearch >= 5.0 + community.general.elasticsearch_plugin: + name: analysis-icu + state: present + +- name: Install the ingest-geoip plugin with a forced installation + community.general.elasticsearch_plugin: + name: ingest-geoip + state: present + force: true +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + +PLUGIN_BIN_PATHS = tuple([ + '/usr/share/elasticsearch/bin/elasticsearch-plugin', + '/usr/share/elasticsearch/bin/plugin' +]) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_name, plugin_dir): + return os.path.isdir(os.path.join(plugin_dir, plugin_name)) + + +def parse_error(string): + reason = "ERROR: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] + is_old_command = (os.path.basename(plugin_bin) == 'plugin') + + # Timeout and version are only valid for plugin, not elasticsearch-plugin + if is_old_command: + if timeout: + cmd_args.append("--timeout %s" % timeout) + + if version: + plugin_name = plugin_name + '/' + version + cmd_args[2] = plugin_name + + if proxy_host and proxy_port: + cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + + # Legacy ES 1.x + if url: + cmd_args.append("--url %s" % url) + + if force: + cmd_args.append("--batch") + if src: + cmd_args.append(src) + else: + cmd_args.append(plugin_name) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err) + + return True, cmd, out, err + + +def get_plugin_bin(module, plugin_bin=None): + # Use the plugin_bin that was supplied first before trying other options + valid_plugin_bin = None + if plugin_bin and os.path.isfile(plugin_bin): + valid_plugin_bin = plugin_bin + + else: + # Add the plugin_bin passed into the module to the top of the list of paths to test, + # testing for that binary name first before falling back to the default paths. + bin_paths = list(PLUGIN_BIN_PATHS) + if plugin_bin and plugin_bin not in bin_paths: + bin_paths.insert(0, plugin_bin) + + # Get separate lists of dirs and binary names from the full paths to the + # plugin binaries. + plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) + plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) + + # Check for the binary names in the default system paths as well as the path + # specified in the module arguments. + for bin_file in plugin_bins: + valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs) + if valid_plugin_bin: + break + + if not valid_plugin_bin: + module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin) + + return valid_plugin_bin + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + src=dict(default=None), + url=dict(default=None), + timeout=dict(default="1m"), + force=dict(type='bool', default=False), + plugin_bin=dict(type="path"), + plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), + proxy_host=dict(default=None), + proxy_port=dict(default=None), + version=dict(default=None) + ), + mutually_exclusive=[("src", "url")], + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + src = module.params["src"] + timeout = module.params["timeout"] + force = module.params["force"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + # Search provided path and system paths for valid binary + plugin_bin = get_plugin_bin(module, plugin_bin) + + repo = parse_plugin_repo(name) + present = is_plugin_present(repo, plugin_dir) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py new file mode 100644 index 000000000..487b6feef --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: emc_vnx_sg_member + +short_description: Manage storage group member on EMC VNX + + +description: + - "This module manages the members of an existing storage group." + +extends_documentation_fragment: + - community.general.emc.emc_vnx + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - Name of the Storage group to manage. + required: true + type: str + lunid: + description: + - Lun id to be added. + required: true + type: int + state: + description: + - Indicates the desired lunid state. + - C(present) ensures specified lunid is present in the Storage Group. + - C(absent) ensures specified lunid is absent from Storage Group. + default: present + choices: [ "present", "absent"] + type: str + + +author: + - Luca 'remix_tj' Lorenzetto (@remixtj) +''' + +EXAMPLES = ''' +- name: Add lun to storage group + community.general.emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: present + +- name: Remove lun from storage group + community.general.emc_vnx_sg_member: + name: sg01 + sp_address: sp1a.fqdn + sp_user: sysadmin + sp_password: sysadmin + lunid: 100 + state: absent +''' + +RETURN = ''' +hluid: + description: LUNID that hosts attached to the storage group will see. + type: int + returned: success +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec + +LIB_IMP_ERR = None +try: + from storops import VNXSystem + from storops.exception import VNXCredentialError, VNXStorageGroupError, \ + VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError + HAS_LIB = True +except Exception: + LIB_IMP_ERR = traceback.format_exc() + HAS_LIB = False + + +def run_module(): + module_args = dict( + name=dict(type='str', required=True), + lunid=dict(type='int', required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + + module_args.update(emc_vnx_argument_spec) + + result = dict( + changed=False, + hluid=None + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + if not HAS_LIB: + module.fail_json(msg=missing_required_lib('storops >= 0.5.10'), + exception=LIB_IMP_ERR) + + sp_user = module.params['sp_user'] + sp_address = module.params['sp_address'] + sp_password = module.params['sp_password'] + alu = module.params['lunid'] + + # if the user is working with this module in only check mode we do not + # want to make any changes to the environment, just return the current + # state with no modifications + if module.check_mode: + return result + + try: + vnx = VNXSystem(sp_address, sp_user, sp_password) + sg = vnx.get_sg(module.params['name']) + if sg.existed: + if module.params['state'] == 'present': + if not sg.has_alu(alu): + try: + result['hluid'] = sg.attach_alu(alu) + result['changed'] = True + except VNXAluAlreadyAttachedError: + result['hluid'] = sg.get_hlu(alu) + except (VNXAttachAluError, VNXStorageGroupError) as e: + module.fail_json(msg='Error attaching {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + result['hluid'] = sg.get_hlu(alu) + if module.params['state'] == 'absent' and sg.has_alu(alu): + try: + sg.detach_alu(alu) + result['changed'] = True + except VNXDetachAluNotFoundError: + # being not attached when using absent is OK + pass + except VNXStorageGroupError as e: + module.fail_json(msg='Error detaching alu {0}: ' + '{1} '.format(alu, to_native(e)), + **result) + else: + module.fail_json(msg='No such storage group named ' + '{0}'.format(module.params['name']), + **result) + except VNXCredentialError as e: + module.fail_json(msg='{0}'.format(to_native(e)), **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/etcd3.py b/ansible_collections/community/general/plugins/modules/etcd3.py new file mode 100644 index 000000000..9cd027406 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/etcd3.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Jean-Philippe Evrard +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: etcd3 +short_description: Set or delete key value pairs from an etcd3 cluster +requirements: + - etcd3 +description: + - Sets or deletes values in etcd3 cluster using its v3 api. + - Needs python etcd3 lib to work +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - the key where the information is stored in the cluster + required: true + value: + type: str + description: + - the information stored + required: true + host: + type: str + description: + - the IP address of the cluster + default: 'localhost' + port: + type: int + description: + - the port number used to connect to the cluster + default: 2379 + state: + type: str + description: + - the state of the value for the key. + - can be present or absent + required: true + choices: [ present, absent ] + user: + type: str + description: + - The etcd user to authenticate with. + password: + type: str + description: + - The password to use for authentication. + - Required if I(user) is defined. + ca_cert: + type: path + description: + - The Certificate Authority to use to verify the etcd host. + - Required if I(client_cert) and I(client_key) are defined. + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if I(client_key) is defined. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if I(client_cert) is defined. + timeout: + type: int + description: + - The socket level timeout in seconds. +author: + - Jean-Philippe Evrard (@evrardjp) + - Victor Fauth (@vfauth) +''' + +EXAMPLES = """ +- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" + community.general.etcd3: + key: "foo" + value: "baz3" + host: "localhost" + port: 2379 + state: "present" + +- name: Authenticate using user/password combination with a timeout of 10 seconds + community.general.etcd3: + key: "foo" + value: "baz3" + state: "present" + user: "someone" + password: "password123" + timeout: 10 + +- name: Authenticate using TLS certificates + community.general.etcd3: + key: "foo" + value: "baz3" + state: "present" + ca_cert: "/etc/ssl/certs/CA_CERT.pem" + client_cert: "/etc/ssl/certs/cert.crt" + client_key: "/etc/ssl/private/key.pem" +""" + +RETURN = ''' +key: + description: The key that was queried + returned: always + type: str +old_value: + description: The previous value in the cluster + returned: always + type: str +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +try: + import etcd3 + HAS_ETCD = True + ETCD_IMP_ERR = None +except ImportError: + ETCD_IMP_ERR = traceback.format_exc() + HAS_ETCD = False + + +def run_module(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + key=dict(type='str', required=True, no_log=False), + value=dict(type='str', required=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=2379), + state=dict(type='str', required=True, choices=['present', 'absent']), + user=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + client_cert=dict(type='path'), + client_key=dict(type='path'), + timeout=dict(type='int'), + ) + + # seed the result dict in the object + # we primarily care about changed and state + # change is if this module effectively modified the target + # state will include any data that you want your module to pass back + # for consumption, for example, in a subsequent task + result = dict( + changed=False, + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[['client_cert', 'client_key'], ['user', 'password']], + ) + + # It is possible to set `ca_cert` to verify the server identity without + # setting `client_cert` or `client_key` to authenticate the client + # so required_together is enough + # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence + # of either `client_cert` or `client_key` is enough + if module.params['ca_cert'] is None and module.params['client_cert'] is not None: + module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.") + + result['key'] = module.params.get('key') + module.params['cert_cert'] = module.params.pop('client_cert') + module.params['cert_key'] = module.params.pop('client_key') + + if not HAS_ETCD: + module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR) + + allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', + 'timeout', 'user', 'password'] + # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is + # the minimum supported version + # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} + client_params = dict() + for key, value in module.params.items(): + if key in allowed_keys: + client_params[key] = value + try: + etcd = etcd3.client(**client_params) + except Exception as exp: + module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)), + exception=traceback.format_exc()) + try: + cluster_value = etcd.get(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)), + exception=traceback.format_exc()) + + # Make the cluster_value[0] a string for string comparisons + result['old_value'] = to_native(cluster_value[0]) + + if module.params['state'] == 'absent': + if cluster_value[0] is not None: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.delete(module.params['key']) + except Exception as exp: + module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + elif module.params['state'] == 'present': + if result['old_value'] != module.params['value']: + if module.check_mode: + result['changed'] = True + else: + try: + etcd.put(module.params['key'], module.params['value']) + except Exception as exp: + module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)), + exception=traceback.format_exc()) + else: + result['changed'] = True + else: + module.fail_json(msg="State not recognized") + + # manipulate or modify the state as needed (this is going to be the + # part where your module will do what it needs to do) + + # during the execution of the module, if there is an exception or a + # conditional state that effectively causes a failure, run + # AnsibleModule.fail_json() to pass in the message and the result + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/facter.py b/ansible_collections/community/general/plugins/modules/facter.py new file mode 100644 index 000000000..e7cf52e20 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/facter.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: facter +short_description: Runs the discovery program I(facter) on the remote system +description: + - Runs the C(facter) discovery program + (U(https://github.com/puppetlabs/facter)) on the remote system, returning + JSON data that can be useful for inventory purposes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + arguments: + description: + - Specifies arguments for facter. + type: list + elements: str +requirements: + - facter + - ruby-json +author: + - Ansible Core Team + - Michael DeHaan +''' + +EXAMPLES = ''' +# Example command-line invocation +# ansible www.example.net -m facter + +- name: Execute facter no arguments + community.general.facter: + +- name: Execute facter with arguments + community.general.facter: + arguments: + - -p + - system_uptime + - timezone + - is_virtual +''' +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + arguments=dict(required=False, type='list', elements='str') + ) + ) + + facter_path = module.get_bin_path( + 'facter', + opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--json"] + if module.params['arguments']: + cmd += module.params['arguments'] + + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/filesize.py b/ansible_collections/community/general/plugins/modules/filesize.py new file mode 100644 index 000000000..b3eb90d61 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/filesize.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: filesize + +short_description: Create a file with a given size, or resize it if it exists + +description: + - This module is a simple wrapper around C(dd) to create, extend or truncate + a file, given its size. It can be used to manage swap files (that require + contiguous blocks) or alternatively, huge sparse files. + +author: + - quidame (@quidame) + +version_added: "3.0.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + path: + description: + - Path of the regular file to create or resize. + type: path + required: true + size: + description: + - Requested size of the file. + - The value is a number (either C(int) or C(float)) optionally followed + by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or + C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), + and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of + C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); + C(G), C(g) or C(GiB) (= 1024MiB); and so on. + - If the multiplicative suffix is not provided, the value is treated as + an integer number of blocks of I(blocksize) bytes each (float values + are rounded to the closest integer). + - When the I(size) value is equal to the current file size, does nothing. + - When the I(size) value is bigger than the current file size, bytes from + I(source) (if I(sparse) is not C(false)) are appended to the file + without truncating it, in other words, without modifying the existing + bytes of the file. + - When the I(size) value is smaller than the current file size, it is + truncated to the requested value without modifying bytes before this + value. + - That means that a file of any arbitrary size can be grown to any other + arbitrary size, and then resized down to its initial size without + modifying its initial content. + type: raw + required: true + blocksize: + description: + - Size of blocks, in bytes if not followed by a multiplicative suffix. + - The numeric value (before the unit) C(MUST) be an integer (or a C(float) + if it equals an integer). + - If not set, the size of blocks is guessed from the OS and commonly + results in C(512) or C(4096) bytes, that is used internally by the + module or when I(size) has no unit. + type: raw + source: + description: + - Device or file that provides input data to provision the file. + - This parameter is ignored when I(sparse=true). + type: path + default: /dev/zero + force: + description: + - Whether or not to overwrite the file if it exists, in other words, to + truncate it from 0. When C(true), the module is not idempotent, that + means it always reports I(changed=true). + - I(force=true) and I(sparse=true) are mutually exclusive. + type: bool + default: false + sparse: + description: + - Whether or not the file to create should be a sparse file. + - This option is effective only on newly created files, or when growing a + file, only for the bytes to append. + - This option is not supported on OSes or filesystems not supporting sparse files. + - I(force=true) and I(sparse=true) are mutually exclusive. + type: bool + default: false + unsafe_writes: + description: + - This option is silently ignored. This module always modifies file + size in-place. + +requirements: + - dd (Data Duplicator) in PATH + +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes + +seealso: + - name: dd(1) manpage for Linux + description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils). + link: https://man7.org/linux/man-pages/man1/dd.1.html + + - name: dd(1) manpage for IBM AIX + description: Manual page of the IBM AIX's dd implementation. + link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html + + - name: dd(1) manpage for Mac OSX + description: Manual page of the Mac OSX's dd implementation. + link: https://www.unix.com/man-page/osx/1/dd/ + + - name: dd(1M) manpage for Solaris + description: Manual page of the Oracle Solaris's dd implementation. + link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html + + - name: dd(1) manpage for FreeBSD + description: Manual page of the FreeBSD's dd implementation. + link: https://www.freebsd.org/cgi/man.cgi?dd(1) + + - name: dd(1) manpage for OpenBSD + description: Manual page of the OpenBSD's dd implementation. + link: https://man.openbsd.org/dd + + - name: dd(1) manpage for NetBSD + description: Manual page of the NetBSD's dd implementation. + link: https://man.netbsd.org/dd.1 + + - name: busybox(1) manpage for Linux + description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation. + link: https://www.unix.com/man-page/linux/1/busybox +''' + +EXAMPLES = r''' +- name: Create a file of 1G filled with null bytes + community.general.filesize: + path: /var/bigfile + size: 1G + +- name: Extend the file to 2G (2*1024^3) + community.general.filesize: + path: /var/bigfile + size: 2G + +- name: Reduce the file to 2GB (2*1000^3) + community.general.filesize: + path: /var/bigfile + size: 2GB + +- name: Fill a file with random bytes for backing a LUKS device + community.general.filesize: + path: ~/diskimage.luks + size: 512.0 MiB + source: /dev/urandom + +- name: Take a backup of MBR boot code into a file, overwriting it if it exists + community.general.filesize: + path: /media/sdb1/mbr.bin + size: 440B + source: /dev/sda + force: true + +- name: Create/resize a sparse file of/to 8TB + community.general.filesize: + path: /var/local/sparsefile + size: 8TB + sparse: true + +- name: Create a file with specific size and attributes, to be used as swap space + community.general.filesize: + path: /var/swapfile + size: 2G + blocksize: 512B + mode: u=rw,go= + owner: root + group: root +''' + +RETURN = r''' +cmd: + description: Command executed to create or resize the file. + type: str + returned: when changed or failed + sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024 + +filesize: + description: Dictionary of sizes related to the file. + type: dict + returned: always + contains: + blocks: + description: Number of blocks in the file. + type: int + sample: 500 + blocksize: + description: Size of the blocks in bytes. + type: int + sample: 1024 + bytes: + description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). + type: int + sample: 512000 + iec: + description: Size of the file, in human-readable format, following IEC standard. + type: str + sample: 500.0 KiB + si: + description: Size of the file, in human-readable format, following SI standard. + type: str + sample: 512.0 kB + +size_diff: + description: Difference (positive or negative) between old size and new size, in bytes. + type: int + sample: -1234567890 + returned: always + +path: + description: Realpath of the file if it is a symlink, otherwise the same than module's param. + type: str + sample: /var/swap0 + returned: always +''' + + +import re +import os +import math + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +# These are the multiplicative suffixes understood (or returned) by dd and +# others (ls, df, lvresize, lsblk...). +SIZE_UNITS = dict( + B=1, + kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, + MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, + GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, + TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, + PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, + EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, + ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, + YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, +) + + +def bytes_to_human(size, iec=False): + """Return human-readable size (with SI or IEC suffix) from bytes. This is + only to populate the returned result of the module, not to handle the + file itself (we only rely on bytes for that). + """ + unit = 'B' + for (u, v) in SIZE_UNITS.items(): + if size < v: + continue + if iec: + if 'i' not in u or size / v >= 1024: + continue + else: + if v % 5 or size / v >= 1000: + continue + unit = u + + hsize = round(size / SIZE_UNITS[unit], 2) + if unit == 'B': + hsize = int(hsize) + + unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) + if unit == 'KB': + unit = 'kB' + + return '%s %s' % (str(hsize), unit) + + +def smart_blocksize(size, unit, product, bsize): + """Ensure the total size can be written as blocks*blocksize, with blocks + and blocksize being integers. + """ + if not product % bsize: + return bsize + + # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes + # is not usable. The smallest integer number of kB to work with 512B blocks + # is 64, the nexts are 128, 192, 256, and so on. + + unit_size = SIZE_UNITS[unit] + + if size == int(size): + if unit_size > SIZE_UNITS['MiB']: + if unit_size % 5: + return SIZE_UNITS['MiB'] + return SIZE_UNITS['MB'] + return unit_size + + if unit == 'B': + raise AssertionError("byte is the smallest unit and requires an integer value") + + if 0 < product < bsize: + return product + + for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2): + if not product % bsz: + return bsz + return 1 + + +def split_size_unit(string, isint=False): + """Split a string between the size value (int or float) and the unit. + Support optional space(s) between the numeric value and the unit. + """ + unit = re.sub(r'(\d|\.)', r'', string).strip() + value = float(re.sub(r'%s' % unit, r'', string).strip()) + if isint and unit in ('B', ''): + if int(value) != value: + raise AssertionError("invalid blocksize value: bytes require an integer value") + + if not unit: + unit = None + product = int(round(value)) + else: + if unit not in SIZE_UNITS.keys(): + raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." % + (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get)))) + product = int(round(value * SIZE_UNITS[unit])) + return value, unit, product + + +def size_string(value): + """Convert a raw value to a string, but only if it is an integer, a float + or a string itself. + """ + if not isinstance(value, (int, float, str)): + raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value)) + return str(value) + + +def size_spec(args): + """Return a dictionary with size specifications, especially the size in + bytes (after rounding it to an integer number of blocks). + """ + blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] + if blocksize_in_bytes == 0: + raise AssertionError("block size cannot be equal to zero") + + size_value, size_unit, size_result = split_size_unit(args['size']) + if not size_unit: + blocks = int(math.ceil(size_value)) + else: + blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) + blocks = int(math.ceil(size_result / blocksize_in_bytes)) + + args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) + args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, + iec=bytes_to_human(round_bytes, True), + si=bytes_to_human(round_bytes)) + return args['size_spec'] + + +def current_size(args): + """Return the size of the file at the given location if it exists, or None.""" + path = args['path'] + if os.path.exists(path): + if not os.path.isfile(path): + raise AssertionError("%s exists but is not a regular file" % path) + args['file_size'] = os.stat(path).st_size + else: + args['file_size'] = None + return args['file_size'] + + +def complete_dd_cmdline(args, dd_cmd): + """Compute dd options to grow or truncate a file.""" + if args['file_size'] == args['size_spec']['bytes'] and not args['force']: + # Nothing to do. + return list() + + bs = args['size_spec']['blocksize'] + + # For sparse files (create, truncate, grow): write count=0 block. + if args['sparse']: + seek = args['size_spec']['blocks'] + elif args['force'] or not os.path.exists(args['path']): # Create file + seek = 0 + elif args['size_diff'] < 0: # Truncate file + seek = args['size_spec']['blocks'] + elif args['size_diff'] % bs: # Grow file + seek = int(args['file_size'] / bs) + 1 + else: + seek = int(args['file_size'] / bs) + + count = args['size_spec']['blocks'] - seek + dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)] + + return dd_cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + size=dict(type='raw', required=True), + blocksize=dict(type='raw'), + source=dict(type='path', default='/dev/zero'), + sparse=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + add_file_common_args=True, + ) + args = dict(**module.params) + diff = dict(before=dict(), after=dict()) + + if args['sparse'] and args['force']: + module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') + if not os.path.exists(os.path.dirname(args['path'])): + module.fail_json(msg='parent directory of the file must exist prior to run this module') + if not args['blocksize']: + args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) + + try: + args['size'] = size_string(args['size']) + args['blocksize'] = size_string(args['blocksize']) + initial_filesize = current_size(args) + size_descriptors = size_spec(args) + except AssertionError as err: + module.fail_json(msg=to_native(err)) + + expected_filesize = size_descriptors['bytes'] + if initial_filesize: + args['size_diff'] = expected_filesize - initial_filesize + diff['after']['size'] = expected_filesize + diff['before']['size'] = initial_filesize + + result = dict( + changed=args['force'], + size_diff=args['size_diff'], + path=args['path'], + filesize=size_descriptors) + + dd_bin = module.get_bin_path('dd', True) + dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']] + + if expected_filesize != initial_filesize or args['force']: + result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) + if module.check_mode: + result['changed'] = True + else: + result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) + + diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) + if initial_filesize: + result['size_diff'] = result_filesize - initial_filesize + if not args['force']: + result['changed'] = result_filesize != initial_filesize + + if result['rc']: + msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % ( + args['path'], args['size'], args['source']) + module.fail_json(msg=msg, **result) + if result_filesize != expected_filesize: + msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % ( + args['path'], args['size'], args['source'], result_filesize) + module.fail_json(msg=msg, **result) + + # dd follows symlinks, and so does this module, while file module doesn't. + # If we call it, this is to manage file's mode, owner and so on, not the + # symlink's ones. + file_params = dict(**module.params) + if os.path.islink(args['path']): + file_params['path'] = result['path'] = os.path.realpath(args['path']) + + if args['file_size'] is not None: + file_args = module.load_file_common_arguments(file_params) + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + result['diff'] = diff + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/filesystem.py b/ansible_collections/community/general/plugins/modules/filesystem.py new file mode 100644 index 000000000..0e6b815b4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/filesystem.py @@ -0,0 +1,606 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, quidame +# Copyright (c) 2013, Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +author: + - Alexander Bulimov (@abulimov) + - quidame (@quidame) +module: filesystem +short_description: Makes a filesystem +description: + - This module creates a filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - If I(state=present), the filesystem is created if it doesn't already + exist, that is the default behaviour if I(state) is omitted. + - If I(state=absent), filesystem signatures on I(dev) are wiped if it + contains a filesystem (as known by C(blkid)). + - When I(state=absent), all other options but I(dev) are ignored, and the + module doesn't fail if the device I(dev) doesn't actually exist. + type: str + choices: [ present, absent ] + default: present + version_added: 1.3.0 + fstype: + choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] + description: + - Filesystem type to be created. This option is required with + I(state=present) (or if I(state) is omitted). + - ufs support has been added in community.general 3.4.0. + type: str + aliases: [type] + dev: + description: + - Target path to block device (Linux) or character device (FreeBSD) or + regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module + only works when applying to regular files, aka disk images. + - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support + a regular file as their target I(dev). + - Support for character devices on FreeBSD has been added in community.general 3.4.0. + type: path + required: true + aliases: [device] + force: + description: + - If C(true), allows to create new filesystem on devices that already has filesystem. + type: bool + default: false + resizefs: + description: + - If C(true), if the block device and filesystem size differ, grow the filesystem into the space. + - Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. + Attempts to resize other filesystem types will fail. + - XFS Will only grow if mounted. Currently, the module is based on commands + from C(util-linux) package to perform operations, so resizing of XFS is + not supported on FreeBSD systems. + - vFAT will likely fail if C(fatresize < 1.04). + type: bool + default: false + opts: + description: + - List of options to be passed to C(mkfs) command. + type: str +requirements: + - Uses specific tools related to the I(fstype) for creating or resizing a + filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or + FreeBSD) or available on both, as C(blkid). + - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. +notes: + - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) + is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also + unable to detect a filesystem), this filesystem is overwritten even if + I(force) is C(false). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide + a C(blkid) command that is compatible with this module. However, these + packages conflict with each other, and only the C(util-linux) package + provides the command required to not fail when I(state=absent). +seealso: + - module: community.general.filesize + - module: ansible.posix.mount +''' + +EXAMPLES = ''' +- name: Create a ext2 filesystem on /dev/sdb1 + community.general.filesystem: + fstype: ext2 + dev: /dev/sdb1 + +- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks + community.general.filesystem: + fstype: ext4 + dev: /dev/sdb1 + opts: -cc + +- name: Blank filesystem signature on /dev/sdb1 + community.general.filesystem: + dev: /dev/sdb1 + state: absent + +- name: Create a filesystem on top of a regular file + community.general.filesystem: + dev: /path/to/disk.img + fstype: vfat +''' + +import os +import platform +import re +import stat + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class Device(object): + def __init__(self, module, path): + self.module = module + self.path = path + + def size(self): + """ Return size in bytes of device. Returns int """ + statinfo = os.stat(self.path) + if stat.S_ISBLK(statinfo.st_mode): + blockdev_cmd = self.module.get_bin_path("blockdev", required=True) + dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + devsize_in_bytes = int(out) + elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': + diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) + dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) + devsize_in_bytes = int(out.split()[2]) + elif os.path.isfile(self.path): + devsize_in_bytes = os.path.getsize(self.path) + else: + self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) + + return devsize_in_bytes + + def get_mountpoint(self): + """Return (first) mountpoint of device. Returns None when not mounted.""" + cmd_findmnt = self.module.get_bin_path("findmnt", required=True) + + # find mountpoint + rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", + "TARGET", "--source", self.path], check_rc=False) + if rc != 0: + mountpoint = None + else: + mountpoint = mountpoint.split('\n')[0] + + return mountpoint + + def __str__(self): + return self.path + + +class Filesystem(object): + + MKFS = None + MKFS_FORCE_FLAGS = [] + INFO = None + GROW = None + GROW_MAX_SPACE_FLAGS = [] + GROW_MOUNTPOINT_ONLY = False + + LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} + + def __init__(self, module): + self.module = module + + @property + def fstype(self): + return type(self).__name__ + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer). + Should query the info with a per-fstype command that can access the + device whenever it is mounted or not, and parse the command output. + Parser must ensure to return an integer, or raise a ValueError. + """ + raise NotImplementedError() + + def create(self, opts, dev): + if self.module.check_mode: + return + + mkfs = self.module.get_bin_path(self.MKFS, required=True) + cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] + self.module.run_command(cmd, check_rc=True) + + def wipefs(self, dev): + if self.module.check_mode: + return + + # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) + # that is ported to FreeBSD. The use of dd as a portable fallback is + # not doable here if it needs get_mountpoint() (to prevent corruption of + # a mounted filesystem), since 'findmnt' is not available on FreeBSD, + # even in util-linux port for this OS. + wipefs = self.module.get_bin_path('wipefs', required=True) + cmd = [wipefs, "--all", str(dev)] + self.module.run_command(cmd, check_rc=True) + + def grow_cmd(self, target): + """Build and return the resizefs commandline as list.""" + cmdline = [self.module.get_bin_path(self.GROW, required=True)] + cmdline += self.GROW_MAX_SPACE_FLAGS + [target] + return cmdline + + def grow(self, dev): + """Get dev and fs size and compare. Returns stdout of used command.""" + devsize_in_bytes = dev.size() + + try: + fssize_in_bytes = self.get_fs_size(dev) + except NotImplementedError: + self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) + except ValueError as err: + self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) + self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) + + if not fssize_in_bytes < devsize_in_bytes: + self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) + elif self.module.check_mode: + self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) + + if self.GROW_MOUNTPOINT_ONLY: + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + grow_target = mountpoint + else: + grow_target = str(dev) + + dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) + return out + + +class Ext(Filesystem): + MKFS_FORCE_FLAGS = ['-F'] + INFO = 'tune2fs' + GROW = 'resize2fs' + + def get_fs_size(self, dev): + """Get Block count and Block size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + block_count = block_size = None + for line in out.splitlines(): + if 'Block count:' in line: + block_count = int(line.split(':')[1].strip()) + elif 'Block size:' in line: + block_size = int(line.split(':')[1].strip()) + if None not in (block_size, block_count): + break + else: + raise ValueError(repr(out)) + + return block_size * block_count + + +class Ext2(Ext): + MKFS = 'mkfs.ext2' + + +class Ext3(Ext): + MKFS = 'mkfs.ext3' + + +class Ext4(Ext): + MKFS = 'mkfs.ext4' + + +class XFS(Filesystem): + MKFS = 'mkfs.xfs' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'xfs_info' + GROW = 'xfs_growfs' + GROW_MOUNTPOINT_ONLY = True + + def get_fs_size(self, dev): + """Get bsize and blocks and return their product.""" + cmdline = [self.module.get_bin_path(self.INFO, required=True)] + + # Depending on the versions, xfs_info is able to get info from the + # device, whenever it is mounted or not, or only if unmounted, or + # only if mounted, or not at all. For any version until now, it is + # able to query info from the mountpoint. So try it first, and use + # device as the last resort: it may or may not work. + mountpoint = dev.get_mountpoint() + if mountpoint: + cmdline += [mountpoint] + else: + cmdline += [str(dev)] + dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) + + block_size = block_count = None + for line in out.splitlines(): + col = line.split('=') + if col[0].strip() == 'data': + if col[1].strip() == 'bsize': + block_size = int(col[2].split()[0]) + if col[2].split()[1] == 'blocks': + block_count = int(col[3].split(',')[0]) + if None not in (block_size, block_count): + break + else: + raise ValueError(repr(out)) + + return block_size * block_count + + +class Reiserfs(Filesystem): + MKFS = 'mkfs.reiserfs' + MKFS_FORCE_FLAGS = ['-q'] + + +class Btrfs(Filesystem): + MKFS = 'mkfs.btrfs' + INFO = 'btrfs' + GROW = 'btrfs' + GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max'] + GROW_MOUNTPOINT_ONLY = True + + def __init__(self, module): + super(Btrfs, self).__init__(module) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) + match = re.search(r" v([0-9.]+)", stdout) + if not match: + # v0.20-rc1 use stderr + match = re.search(r" v([0-9.]+)", stderr) + if match: + # v0.20-rc1 doesn't have --force parameter added in following version v3.12 + if LooseVersion(match.group(1)) >= LooseVersion('3.12'): + self.MKFS_FORCE_FLAGS = ['-f'] + else: + # assume version is greater or equal to 3.12 + self.MKFS_FORCE_FLAGS = ['-f'] + self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'filesystem', 'usage', '-b', mountpoint], check_rc=True) + for line in stdout.splitlines(): + if "Device size" in line: + return int(line.split()[-1]) + raise ValueError(repr(stdout)) + + +class Ocfs2(Filesystem): + MKFS = 'mkfs.ocfs2' + MKFS_FORCE_FLAGS = ['-Fx'] + + +class F2fs(Filesystem): + MKFS = 'mkfs.f2fs' + INFO = 'dump.f2fs' + GROW = 'resize.f2fs' + + def __init__(self, module): + super(F2fs, self).__init__(module) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) + # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" + # mkfs.f2fs displays version since v1.2.0 + match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) + if match is not None: + # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem + # before that version -f switch wasn't used + if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): + self.MKFS_FORCE_FLAGS = ['-f'] + + def get_fs_size(self, dev): + """Get sector size and total FS sectors and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + sector_size = sector_count = None + for line in out.splitlines(): + if 'Info: sector size = ' in line: + # expected: 'Info: sector size = 512' + sector_size = int(line.split()[4]) + elif 'Info: total FS sectors = ' in line: + # expected: 'Info: total FS sectors = 102400 (50 MB)' + sector_count = int(line.split()[5]) + if None not in (sector_size, sector_count): + break + else: + raise ValueError(repr(out)) + + return sector_size * sector_count + + +class VFAT(Filesystem): + INFO = 'fatresize' + GROW = 'fatresize' + GROW_MAX_SPACE_FLAGS = ['-s', 'max'] + + def __init__(self, module): + super(VFAT, self).__init__(module) + if platform.system() == 'FreeBSD': + self.MKFS = 'newfs_msdos' + else: + self.MKFS = 'mkfs.vfat' + + def get_fs_size(self, dev): + """Get and return size of filesystem, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + fssize = None + for line in out.splitlines()[1:]: + parts = line.split(':', 1) + if len(parts) < 2: + continue + param, value = parts + if param.strip() in ('Size', 'Cur size'): + fssize = int(value.strip()) + break + else: + raise ValueError(repr(out)) + + return fssize + + +class LVM(Filesystem): + MKFS = 'pvcreate' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'pvs' + GROW = 'pvresize' + + def get_fs_size(self, dev): + """Get and return PV size, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) + pv_size = int(size) + return pv_size + + +class Swap(Filesystem): + MKFS = 'mkswap' + MKFS_FORCE_FLAGS = ['-f'] + + +class UFS(Filesystem): + MKFS = 'newfs' + INFO = 'dumpfs' + GROW = 'growfs' + GROW_MAX_SPACE_FLAGS = ['-y'] + + def get_fs_size(self, dev): + """Get providersize and fragment size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + fragmentsize = providersize = None + for line in out.splitlines(): + if line.startswith('fsize'): + fragmentsize = int(line.split()[1]) + elif 'providersize' in line: + providersize = int(line.split()[-1]) + if None not in (fragmentsize, providersize): + break + else: + raise ValueError(repr(out)) + + return fragmentsize * providersize + + +FILESYSTEMS = { + 'ext2': Ext2, + 'ext3': Ext3, + 'ext4': Ext4, + 'ext4dev': Ext4, + 'f2fs': F2fs, + 'reiserfs': Reiserfs, + 'xfs': XFS, + 'btrfs': Btrfs, + 'vfat': VFAT, + 'ocfs2': Ocfs2, + 'LVM2_member': LVM, + 'swap': Swap, + 'ufs': UFS, +} + + +def main(): + friendly_names = { + 'lvm': 'LVM2_member', + } + + fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys()) + + # There is no "single command" to manipulate filesystems, so we map them all out and their options + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), + dev=dict(type='path', required=True, aliases=['device']), + opts=dict(type='str'), + force=dict(type='bool', default=False), + resizefs=dict(type='bool', default=False), + ), + required_if=[ + ('state', 'present', ['fstype']) + ], + supports_check_mode=True, + ) + + state = module.params['state'] + dev = module.params['dev'] + fstype = module.params['fstype'] + opts = module.params['opts'] + force = module.params['force'] + resizefs = module.params['resizefs'] + + mkfs_opts = [] + if opts is not None: + mkfs_opts = opts.split() + + changed = False + + if not os.path.exists(dev): + msg = "Device %s not found." % dev + if state == "present": + module.fail_json(msg=msg) + else: + module.exit_json(msg=msg) + + dev = Device(module, dev) + + # In case blkid/fstyp isn't able to identify an existing filesystem, device + # is considered as empty, then this existing filesystem would be overwritten + # even if force isn't enabled. + cmd = module.get_bin_path('blkid', required=True) + rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) + fs = raw_fs.strip() + if not fs and platform.system() == 'FreeBSD': + cmd = module.get_bin_path('fstyp', required=True) + rc, raw_fs, err = module.run_command([cmd, str(dev)]) + fs = raw_fs.strip() + + if state == "present": + if fstype in friendly_names: + fstype = friendly_names[fstype] + + try: + klass = FILESYSTEMS[fstype] + except KeyError: + module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype) + + filesystem = klass(module) + + same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] + if same_fs and not resizefs and not force: + module.exit_json(changed=False) + elif same_fs and resizefs: + if not filesystem.GROW: + module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) + + out = filesystem.grow(dev) + + module.exit_json(changed=True, msg=out) + elif fs and not force: + module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err) + + # create fs + filesystem.create(mkfs_opts, dev) + changed = True + + elif fs: + # wipe fs signatures + filesystem = Filesystem(module) + filesystem.wipefs(dev) + changed = True + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/flatpak.py b/ansible_collections/community/general/plugins/modules/flatpak.py new file mode 100644 index 000000000..40a13736f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/flatpak.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: flatpak +short_description: Manage flatpaks +description: + - Allows users to add or remove flatpaks. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. +author: + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) +requirements: + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + type: path + default: flatpak + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + type: str + choices: [ system, user ] + default: system + name: + description: + - The name of the flatpak to manage. To operate on several packages this + can accept a list of packages. + - When used with I(state=present), I(name) can be specified as a URL to a + C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. + - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote + to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). + - When used with I(state=absent), it is recommended to specify the name in the reverse DNS + format. + - When supplying a URL with I(state=absent), the module will try to match the + installed flatpak based on the name of the flatpakref to remove it. However, there is no + guarantee that the names of the flatpakref file and the reverse DNS name of the installed + flatpak do match. + type: list + elements: str + required: true + no_dependencies: + description: + - If installing runtime dependencies should be omitted or not + - This parameter is primarily implemented for integration testing this module. + There might however be some use cases where you would want to have this, like when you are + packaging your own flatpaks. + type: bool + default: false + version_added: 3.2.0 + remote: + description: + - The flatpak remote (repository) to install the flatpak from. + - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before + you can use this. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. + type: str + default: flathub + state: + description: + - Indicates the desired package state. + choices: [ absent, present ] + type: str + default: present +''' + +EXAMPLES = r''' +- name: Install the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: present + +- name: Install the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: present + no_dependencies: true + +- name: Install the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: present + method: user + +- name: Install the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: present + remote: gnome + +- name: Install multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + +- name: Remove the gedit flatpak + community.general.flatpak: + name: org.gnome.gedit + state: absent + +- name: Remove multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: absent +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" +''' + +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" + + +def install_flat(module, binary, remote, names, method, no_dependencies): + """Add new flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + uri_names = [] + id_names = [] + for name in names: + if name.startswith('http://') or name.startswith('https://'): + uri_names.append(name) + else: + id_names.append(name) + base_command = [binary, "install", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + base_command += ["-y"] + else: + base_command += ["--noninteractive"] + if no_dependencies: + base_command += ["--no-deps"] + if uri_names: + command = base_command + uri_names + _flatpak_command(module, module.check_mode, command) + if id_names: + command = base_command + [remote] + id_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def uninstall_flat(module, binary, names, method): + """Remove existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "uninstall"] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + command += ["--{0}".format(method)] + installed_flat_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def flatpak_exists(module, binary, names, method): + """Check if the flatpaks are installed.""" + command = [binary, "list", "--{0}".format(method)] + output = _flatpak_command(module, False, command) + installed = [] + not_installed = [] + for name in names: + parsed_name = _parse_flatpak_name(name).lower() + if parsed_name in output.lower(): + installed.append(name) + else: + not_installed.append(name) + return installed, not_installed + + +def _match_installed_flat_name(module, binary, name, method): + # This is a difficult function, since if the user supplies a flatpakref url, + # we have to rely on a naming convention: + # The flatpakref file name needs to match the flatpak name + global result # pylint: disable=global-variable-not-assigned + parsed_name = _parse_flatpak_name(name) + # Try running flatpak list with columns feature + command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] + _flatpak_command(module, False, command, ignore_failure=True) + if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: + # Probably flatpak before 1.2 + matched_flatpak_name = \ + _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) + else: + # Probably flatpak >= 1.2 + matched_flatpak_name = \ + _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) + + if matched_flatpak_name: + return matched_flatpak_name + else: + result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ + "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ + "If you used a URL, try using the reverse DNS name of the flatpak" + module.fail_json(**result) + + +def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() == row.lower(): + return row + + +def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "list", "--{0}".format(method), "--app"] + output = _flatpak_command(module, False, command) + for row in output.split('\n'): + if parsed_name.lower() in row.lower(): + return row.split()[0] + + +def _parse_flatpak_name(name): + if name.startswith('http://') or name.startswith('https://'): + file_name = urlparse(name).path.split('/')[-1] + file_name_without_extension = file_name.split('.')[0:-1] + common_name = ".".join(file_name_without_extension) + else: + common_name = name + return common_name + + +def _flatpak_version(module, binary): + global result # pylint: disable=global-variable-not-assigned + command = [binary, "--version"] + output = _flatpak_command(module, False, command) + version_number = output.split()[1] + return version_number + + +def _flatpak_command(module, noop, command, ignore_failure=False): + global result # pylint: disable=global-variable-not-assigned + result['command'] = ' '.join(command) + if noop: + result['rc'] = 0 + return "" + + result['rc'], result['stdout'], result['stderr'] = module.run_command( + command, check_rc=not ignore_failure + ) + return result['stdout'] + + +def main(): + # This module supports check mode + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + remote=dict(type='str', default='flathub'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default='present', + choices=['absent', 'present']), + no_dependencies=dict(type='bool', default=False), + executable=dict(type='path', default='flatpak') + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + remote = module.params['remote'] + no_dependencies = module.params['no_dependencies'] + method = module.params['method'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + installed, not_installed = flatpak_exists(module, binary, name, method) + if state == 'present' and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) + elif state == 'absent' and installed: + uninstall_flat(module, binary, installed, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/ansible_collections/community/general/plugins/modules/flatpak_remote.py new file mode 100644 index 000000000..9c097c411 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/flatpak_remote.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: flatpak_remote +short_description: Manage flatpak repository remotes +description: + - Allows users to add or remove flatpak remotes. + - The flatpak remotes concept is comparable to what is called repositories in other packaging + formats. + - Currently, remote addition is only supported via I(flatpakrepo) file URLs. + - Existing remotes will not be updated. + - See the M(community.general.flatpak) module for managing flatpaks. +author: + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) +requirements: + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + type: str + default: flatpak + flatpakrepo_url: + description: + - The URL to the I(flatpakrepo) file representing the repository remote to add. + - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) + is added using the specified installation C(method). + - When used with I(state=absent), this is not required. + - Required when I(state=present). + type: str + method: + description: + - The installation method to use. + - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) + or only for the current C(user). + type: str + choices: [ system, user ] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with I(state=present), the remote will be added to the managed host under + the specified I(name). + - When used with I(state=absent) the remote with that name will be removed. + type: str + required: true + state: + description: + - Indicates the desired package state. + type: str + choices: [ absent, present ] + default: present + enabled: + description: + - Indicates whether this remote is enabled. + type: bool + default: true + version_added: 6.4.0 +''' + +EXAMPLES = r''' +- name: Add the Gnome flatpak remote to the system installation + community.general.flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + community.general.flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + community.general.flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + community.general.flatpak_remote: + name: flathub + state: absent + +- name: Disable the flathub remote in the system installation + community.general.flatpak_remote: + name: flathub + state: present + enabled: false +''' + +RETURN = r''' +command: + description: The exact flatpak command that was executed + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +msg: + description: Module error message + returned: failure + type: str + sample: "Executable '/usr/local/bin/flatpak' was not found on the system." +rc: + description: Return code from flatpak binary + returned: When a flatpak command has been executed + type: int + sample: 0 +stderr: + description: Error output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" +stdout: + description: Output from flatpak binary + returned: When a flatpak command has been executed + type: str + sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-delete", "--{0}".format(method), "--force", name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def enable_remote(module, binary, name, method): + """Enable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--enable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def disable_remote(module, binary, name, method): + """Disable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--disable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_enabled(module, binary, name, method): + """Check if the remote is enabled.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",") + return False + + +def _flatpak_command(module, noop, command): + global result # pylint: disable=global-variable-not-assigned + result['command'] = ' '.join(command) + if noop: + result['rc'] = 0 + return "" + + result['rc'], result['stdout'], result['stderr'] = module.run_command( + command, check_rc=True + ) + return result['stdout'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + enabled=dict(type='bool', default=True), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + enabled = module.params['enabled'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + if state == 'present': + remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method) + + if enabled and not remote_already_enabled: + enable_remote(module, binary, name, method) + if not enabled and remote_already_enabled: + disable_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/flowdock.py b/ansible_collections/community/general/plugins/modules/flowdock.py new file mode 100644 index 000000000..c78716ba4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/flowdock.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: flowdock +author: "Matt Coddington (@mcodd)" +short_description: Send a message to a flowdock +description: + - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token. + required: true + type: + type: str + description: + - Whether to post to 'inbox' or 'chat' + required: true + choices: [ "inbox", "chat" ] + msg: + type: str + description: + - Content of the message + required: true + tags: + type: str + description: + - tags of the message, separated by commas + required: false + external_user_name: + type: str + description: + - (chat only - required) Name of the "user" sending the message + required: false + from_address: + type: str + description: + - (inbox only - required) Email address of the message sender + required: false + source: + type: str + description: + - (inbox only - required) Human readable identifier of the application that uses the Flowdock API + required: false + subject: + type: str + description: + - (inbox only - required) Subject line of the message + required: false + from_name: + type: str + description: + - (inbox only) Name of the message sender + required: false + reply_to: + type: str + description: + - (inbox only) Email address for replies + required: false + project: + type: str + description: + - (inbox only) Human readable identifier for more detailed message categorization + required: false + link: + type: str + description: + - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. + required: false + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: true + type: bool + +requirements: [ ] +''' + +EXAMPLES = ''' +- name: Send a message to a flowdock + community.general.flowdock: + type: inbox + token: AAAAAA + from_address: user@example.com + source: my cool app + msg: test from ansible + subject: test subject + +- name: Send a message to a flowdock + community.general.flowdock: + type: chat + token: AAAAAA + external_user_name: testuser + msg: test from ansible + tags: tag1,tag2,tag3 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + msg=dict(required=True), + type=dict(required=True, choices=["inbox", "chat"]), + external_user_name=dict(required=False), + from_address=dict(required=False), + source=dict(required=False), + subject=dict(required=False), + from_name=dict(required=False), + reply_to=dict(required=False), + project=dict(required=False), + tags=dict(required=False), + link=dict(required=False), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + type = module.params["type"] + token = module.params["token"] + if type == 'inbox': + url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) + else: + url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) + + params = {} + + # required params + params['content'] = module.params["msg"] + + # required params for the 'chat' type + if module.params['external_user_name']: + if type == 'inbox': + module.fail_json(msg="external_user_name is not valid for the 'inbox' type") + else: + params['external_user_name'] = module.params["external_user_name"] + elif type == 'chat': + module.fail_json(msg="external_user_name is required for the 'chat' type") + + # required params for the 'inbox' type + for item in ['from_address', 'source', 'subject']: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + elif type == 'inbox': + module.fail_json(msg="%s is required for the 'inbox' type" % item) + + # optional params + if module.params["tags"]: + params['tags'] = module.params["tags"] + + # optional params for the 'inbox' type + for item in ['from_name', 'reply_to', 'project', 'link']: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=False) + + # Send the data to Flowdock + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] != 200: + module.fail_json(msg="unable to send msg: %s" % info['msg']) + + module.exit_json(changed=True, msg=module.params["msg"]) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/gandi_livedns.py new file mode 100644 index 000000000..cc9dd630b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gandi_livedns.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 Gregory Thiemonge +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gandi_livedns +author: + - Gregory Thiemonge (@gthiemonge) +version_added: "2.3.0" +short_description: Manage Gandi LiveDNS records +description: + - "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + description: + - Account API token. + type: str + required: true + record: + description: + - Record to add. + type: str + required: true + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [ absent, present ] + default: present + ttl: + description: + - The TTL to give the new record. + - Required when I(state=present). + type: int + type: + description: + - The type of DNS record to create. + type: str + required: true + values: + description: + - The record values. + - Required when I(state=present). + type: list + elements: str + domain: + description: + - The name of the Domain to work with (for example, "example.com"). + required: true + type: str +''' + +EXAMPLES = r''' +- name: Create a test A record to point to 127.0.0.1 in the my.com domain + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapitoken + register: record + +- name: Create a mail CNAME record to www.my.com domain + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 7200 + api_key: dummyapitoken + state: present + +- name: Change its TTL + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 10800 + api_key: dummyapitoken + state: present + +- name: Delete the record + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + api_key: dummyapitoken + state: absent +''' + +RETURN = r''' +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', required=True, no_log=True), + record=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ttl=dict(type='int'), + type=dict(type='str', required=True), + values=dict(type='list', elements='str'), + domain=dict(type='str', required=True), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['values', 'ttl']), + ], + ) + + gandi_api = GandiLiveDNSAPI(module) + + if module.params['state'] == 'present': + ret, changed = gandi_api.ensure_dns_record(module.params['record'], + module.params['type'], + module.params['ttl'], + module.params['values'], + module.params['domain']) + else: + ret, changed = gandi_api.delete_dns_record(module.params['record'], + module.params['type'], + module.params['values'], + module.params['domain']) + + result = dict( + changed=changed, + ) + if ret: + result['record'] = gandi_api.build_result(ret, + module.params['domain']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gconftool2.py b/ansible_collections/community/general/plugins/modules/gconftool2.py new file mode 100644 index 000000000..949e92b30 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gconftool2.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Kenneth D. Evensen +# Copyright (c) 2017, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gconftool2 +author: + - Kenneth D. Evensen (@kevensen) +short_description: Edit GNOME Configurations +description: + - This module allows for the manipulation of GNOME 2 Configuration via + gconftool-2. Please see the gconftool-2(1) man pages for more details. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - A GConf preference key is an element in the GConf repository + that corresponds to an application preference. See man gconftool-2(1). + required: true + value: + type: str + description: + - Preference keys typically have simple values such as strings, + integers, or lists of strings and integers. This is ignored if the state + is "get". See man gconftool-2(1). + value_type: + type: str + description: + - The type of value being set. This is ignored if the state is "get". + choices: [ bool, float, int, string ] + state: + type: str + description: + - The action to take upon the key/value. + - State C(get) is deprecated and will be removed in community.general 8.0.0. Please use the module M(community.general.gconftool2_info) instead. + required: true + choices: [ absent, get, present ] + config_source: + type: str + description: + - Specify a configuration source to use rather than the default path. + See man gconftool-2(1). + direct: + description: + - Access the config database directly, bypassing server. If direct is + specified then the config_source must be specified as well. + See man gconftool-2(1). + type: bool + default: false +''' + +EXAMPLES = """ +- name: Change the widget font to "Serif 12" + community.general.gconftool2: + key: "/desktop/gnome/interface/font_name" + value_type: "string" + value: "Serif 12" +""" + +RETURN = ''' + key: + description: The key specified in the module parameters + returned: success + type: str + sample: /desktop/gnome/interface/font_name + value_type: + description: The type of the value that was changed + returned: success + type: str + sample: string + value: + description: The value of the preference key after executing the module + returned: success + type: str + sample: "Serif 12" +... +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftool(StateModuleHelper): + change_params = ('value', ) + diff_params = ('value', ) + output_params = ('key', 'value_type') + facts_params = ('key', 'value_type') + facts_name = 'gconftool2' + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), + value=dict(type='str'), + state=dict(type='str', required=True, choices=['absent', 'get', 'present']), + direct=dict(type='bool', default=False), + config_source=dict(type='str'), + ), + required_if=[ + ('state', 'present', ['value', 'value_type']), + ('state', 'absent', ['value']), + ('direct', True, ['config_source']), + ], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + if self.vars.state != "get": + if not self.vars.direct and self.vars.config_source is not None: + self.module.fail_json(msg='If the "config_source" is specified then "direct" must be "true"') + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + def _make_process(self, fail_on_err): + def process(rc, out, err): + if err and fail_on_err: + self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err))) + self.vars.value = out.rstrip() + return self.vars.value + return process + + def _get(self): + return self.runner("state key", output_process=self._make_process(False)).run(state="get") + + def state_get(self): + self.deprecate( + msg="State 'get' is deprecated. Please use the module community.general.gconftool2_info instead", + version="8.0.0", collection_name="community.general" + ) + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('new_value', None, fact=True) + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + self.vars.set('new_value', ctx.run(), fact=True) + + +def main(): + GConftool.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gconftool2_info.py b/ansible_collections/community/general/plugins/modules/gconftool2_info.py new file mode 100644 index 000000000..282065b95 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gconftool2_info.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gconftool2_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve GConf configurations +version_added: 5.1.0 +description: + - This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + key: + description: + - The key name for an element in the GConf database. + type: str + required: true +notes: + - See man gconftool-2(1) for more details. +seealso: + - name: gconf repository (archived) + description: Git repository for the project. It is an archived project, so the repository is read-only. + link: https://gitlab.gnome.org/Archive/gconf +''' + +EXAMPLES = """ +- name: Get value for a certain key in the database. + community.general.gconftool2_info: + key: /desktop/gnome/background/picture_filename + register: result +""" + +RETURN = ''' + value: + description: + - The value of the property. + returned: success + type: str + sample: Monospace 10 +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftoolInfo(ModuleHelper): + output_params = ['key'] + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + + def __run__(self): + with self.runner.context(args_order=["state", "key"]) as ctx: + rc, out, err = ctx.run(state="get") + self.vars.value = None if err and not out else out.rstrip() + + +def main(): + GConftoolInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gem.py b/ansible_collections/community/general/plugins/modules/gem.py new file mode 100644 index 000000000..4bc99d39e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gem.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Johan Wiren +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: gem +short_description: Manage Ruby gems +description: + - Manage installation and uninstallation of Ruby gems. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the gem to be managed. + required: true + state: + type: str + description: + - The desired state of the gem. C(latest) ensures that the latest version is installed. + required: false + choices: [present, absent, latest] + default: present + gem_source: + type: path + description: + - The path to a local gem used as installation source. + required: false + include_dependencies: + description: + - Whether to include dependencies or not. + required: false + type: bool + default: true + repository: + type: str + description: + - The repository from which the gem will be installed + required: false + aliases: [source] + user_install: + description: + - Install gem in user's local gems cache or for all users + required: false + type: bool + default: true + executable: + type: path + description: + - Override the path to the gem executable + required: false + install_dir: + type: path + description: + - Install the gems into a specific directory. + These gems will be independent from the global installed ones. + Specifying this requires user_install to be false. + required: false + bindir: + type: path + description: + - Install executables into a specific directory. + version_added: 3.3.0 + norc: + type: bool + default: true + description: + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - The default changed from C(false) to C(true) in community.general 6.0.0. + version_added: 3.3.0 + env_shebang: + description: + - Rewrite the shebang line on installed scripts to use /usr/bin/env. + required: false + default: false + type: bool + version: + type: str + description: + - Version of the gem to be installed/removed. + required: false + pre_release: + description: + - Allow installation of pre-release versions of the gem. + required: false + default: false + type: bool + include_doc: + description: + - Install with or without docs. + required: false + default: false + type: bool + build_flags: + type: str + description: + - Allow adding build flags for gem compilation + required: false + force: + description: + - Force gem to (un-)install, bypassing dependency checks. + required: false + default: false + type: bool +author: + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +''' + +EXAMPLES = ''' +- name: Install version 1.0 of vagrant + community.general.gem: + name: vagrant + version: 1.0 + state: present + +- name: Install latest available version of rake + community.general.gem: + name: rake + state: latest + +- name: Install rake version 1.0 from a local gem on disk + community.general.gem: + name: rake + gem_source: /path/to/gems/rake-1.0.gem + state: present +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def get_rubygems_path(module): + if module.params['executable']: + result = module.params['executable'].split(' ') + else: + result = [module.get_bin_path('gem', True)] + return result + + +def get_rubygems_version(module): + if hasattr(get_rubygems_version, "ver"): + return get_rubygems_version.ver + + cmd = get_rubygems_path(module) + ['--version'] + (rc, out, err) = module.run_command(cmd, check_rc=True) + + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) + if not match: + return None + + ver = tuple(int(x) for x in match.groups()) + get_rubygems_version.ver = ver + + return ver + + +def get_rubygems_environ(module): + if module.params['install_dir']: + return {'GEM_HOME': module.params['install_dir']} + return None + + +def get_installed_versions(module, remote=False): + + cmd = get_rubygems_path(module) + cmd.append('query') + cmd.extend(common_opts(module)) + if remote: + cmd.append('--remote') + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + cmd.append('-n') + cmd.append('^%s$' % module.params['name']) + + environ = get_rubygems_environ(module) + (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True) + installed_versions = [] + for line in out.splitlines(): + match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line) + if match: + versions = match.group(1) + for version in versions.split(', '): + installed_versions.append(version.split()[0]) + return installed_versions + + +def exists(module): + if module.params['state'] == 'latest': + remoteversions = get_installed_versions(module, remote=True) + if remoteversions: + module.params['version'] = remoteversions[0] + installed_versions = get_installed_versions(module) + if module.params['version']: + if module.params['version'] in installed_versions: + return True + else: + if installed_versions: + return True + return False + + +def common_opts(module): + opts = [] + ver = get_rubygems_version(module) + if module.params['norc'] and ver and ver >= (2, 5, 2): + opts.append('--norc') + return opts + + +def uninstall(module): + + if module.check_mode: + return + cmd = get_rubygems_path(module) + environ = get_rubygems_environ(module) + cmd.append('uninstall') + cmd.extend(common_opts(module)) + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + else: + cmd.append('--all') + cmd.append('--executable') + if module.params['force']: + cmd.append('--force') + cmd.append(module.params['name']) + module.run_command(cmd, environ_update=environ, check_rc=True) + + +def install(module): + + if module.check_mode: + return + + ver = get_rubygems_version(module) + + cmd = get_rubygems_path(module) + cmd.append('install') + cmd.extend(common_opts(module)) + if module.params['version']: + cmd.extend(['--version', module.params['version']]) + if module.params['repository']: + cmd.extend(['--source', module.params['repository']]) + if not module.params['include_dependencies']: + cmd.append('--ignore-dependencies') + else: + if ver and ver < (2, 0, 0): + cmd.append('--include-dependencies') + if module.params['user_install']: + cmd.append('--user-install') + else: + cmd.append('--no-user-install') + if module.params['install_dir']: + cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + if module.params['pre_release']: + cmd.append('--pre') + if not module.params['include_doc']: + if ver and ver < (2, 0, 0): + cmd.append('--no-rdoc') + cmd.append('--no-ri') + else: + cmd.append('--no-document') + if module.params['env_shebang']: + cmd.append('--env-shebang') + cmd.append(module.params['gem_source']) + if module.params['build_flags']: + cmd.extend(['--', module.params['build_flags']]) + if module.params['force']: + cmd.append('--force') + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + executable=dict(required=False, type='path'), + gem_source=dict(required=False, type='path'), + include_dependencies=dict(required=False, default=True, type='bool'), + name=dict(required=True, type='str'), + repository=dict(required=False, aliases=['source'], type='str'), + state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(required=False, default=True, type='bool'), + install_dir=dict(required=False, type='path'), + bindir=dict(type='path'), + norc=dict(type='bool', default=True), + pre_release=dict(required=False, default=False, type='bool'), + include_doc=dict(required=False, default=False, type='bool'), + env_shebang=dict(required=False, default=False, type='bool'), + version=dict(required=False, type='str'), + build_flags=dict(required=False, type='str'), + force=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], + ) + + if module.params['version'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot specify version when state=latest") + if module.params['gem_source'] and module.params['state'] == 'latest': + module.fail_json(msg="Cannot maintain state=latest when installing from local source") + if module.params['user_install'] and module.params['install_dir']: + module.fail_json(msg="install_dir requires user_install=false") + + if not module.params['gem_source']: + module.params['gem_source'] = module.params['name'] + + changed = False + + if module.params['state'] in ['present', 'latest']: + if not exists(module): + install(module) + changed = True + elif module.params['state'] == 'absent': + if exists(module): + uninstall(module) + changed = True + + result = {} + result['name'] = module.params['name'] + result['state'] = module.params['state'] + if module.params['version']: + result['version'] = module.params['version'] + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/git_config.py b/ansible_collections/community/general/plugins/modules/git_config.py new file mode 100644 index 000000000..d67312174 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/git_config.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Marius Gedminas +# Copyright (c) 2016, Matthew Gamble +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: git_config +author: + - Matthew Gamble (@djmattyg007) + - Marius Gedminas (@mgedmin) +requirements: ['git'] +short_description: Read and write git configuration +description: + - The C(git_config) module changes git configuration by invoking 'git config'. + This is needed if you do not want to use M(ansible.builtin.template) for the entire git + config file (for example because you need to change just C(user.email) in + /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or + do not work correctly in check mode. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + list_all: + description: + - List all settings (optionally limited to a given I(scope)). + type: bool + default: false + name: + description: + - The name of the setting. If no value is supplied, the value will + be read from the config if it has been set. + type: str + repo: + description: + - Path to a git repository for reading and writing values from a + specific repo. + type: path + file: + description: + - Path to an adhoc git configuration file to be managed using the C(file) scope. + type: path + version_added: 2.0.0 + scope: + description: + - Specify which scope to read/set values from. + - This is required when setting config values. + - If this is set to C(local), you must also specify the C(repo) parameter. + - If this is set to C(file), you must also specify the C(file) parameter. + - It defaults to system only when not using I(list_all)=C(true). + choices: [ "file", "local", "global", "system" ] + type: str + state: + description: + - "Indicates the setting should be set/unset. + This parameter has higher precedence than I(value) parameter: + when I(state)=absent and I(value) is defined, I(value) is discarded." + choices: [ 'present', 'absent' ] + default: 'present' + type: str + value: + description: + - When specifying the name of a single setting, supply a value to + set that setting to the given value. + type: str +''' + +EXAMPLES = ''' +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + value: commit + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.st + scope: global + value: status + +- name: Remove a setting from ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + state: absent + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: core.editor + scope: global + value: vim + +- name: Add a setting system-wide + community.general.git_config: + name: alias.remotev + scope: system + value: remote -v + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: alias.diffc + value: diff --cached + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: color.ui + value: auto + +- name: Make etckeeper not complaining when it is invoked by cron + community.general.git_config: + name: user.email + repo: /etc + scope: local + value: 'root@{{ ansible_fqdn }}' + +- name: Read individual values from git config + community.general.git_config: + name: alias.ci + scope: global + +- name: Scope system is also assumed when reading values, unless list_all=true + community.general.git_config: + name: alias.diffc + +- name: Read all values from git config + community.general.git_config: + list_all: true + scope: global + +- name: When list_all is yes and no scope is specified, you get configuration from all scopes + community.general.git_config: + list_all: true + +- name: Specify a repository to include local settings + community.general.git_config: + list_all: true + repo: /path/to/repo.git +''' + +RETURN = ''' +--- +config_value: + description: When I(list_all=false) and value is not set, a string containing the value of the setting in name + returned: success + type: str + sample: "vim" + +config_values: + description: When I(list_all=true), a dict containing key/value pairs of multiple configuration settings + returned: success + type: dict + sample: + core.editor: "vim" + color.ui: "auto" + alias.diffc: "diff --cached" + alias.remotev: "remote -v" +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + list_all=dict(required=False, type='bool', default=False), + name=dict(type='str'), + repo=dict(type='path'), + file=dict(type='path'), + scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), + state=dict(required=False, type='str', default='present', choices=['present', 'absent']), + value=dict(required=False), + ), + mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], + required_if=[ + ('scope', 'local', ['repo']), + ('scope', 'file', ['file']) + ], + required_one_of=[['list_all', 'name']], + supports_check_mode=True, + ) + git_path = module.get_bin_path('git', True) + + params = module.params + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if params['name']: + name = params['name'] + else: + name = None + + if params['scope']: + scope = params['scope'] + elif params['list_all']: + scope = None + else: + scope = 'system' + + if params['state'] == 'absent': + unset = 'unset' + params['value'] = None + else: + unset = None + + if params['value']: + new_value = params['value'] + else: + new_value = None + + args = [git_path, "config", "--includes"] + if params['list_all']: + args.append('-l') + if scope == 'file': + args.append('-f') + args.append(params['file']) + elif scope: + args.append("--" + scope) + if name: + args.append(name) + + if scope == 'local': + dir = params['repo'] + elif params['list_all'] and params['repo']: + # Include local settings from a specific repo when listing all available settings + dir = params['repo'] + else: + # Run from root directory to avoid accidentally picking up any local config settings + dir = "/" + + (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) + if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: + # This just means nothing has been set at the given scope + module.exit_json(changed=False, msg='', config_values={}) + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) + + if params['list_all']: + values = out.rstrip().splitlines() + config_values = {} + for value in values: + k, v = value.split('=', 1) + config_values[k] = v + module.exit_json(changed=False, msg='', config_values=config_values) + elif not new_value and not unset: + module.exit_json(changed=False, msg='', config_value=out.rstrip()) + elif unset and not out: + module.exit_json(changed=False, msg='no setting to unset') + else: + old_value = out.rstrip() + if old_value == new_value: + module.exit_json(changed=False, msg="") + + if not module.check_mode: + if unset: + args.insert(len(args) - 1, "--" + unset) + cmd = args + else: + cmd = args + [new_value] + (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) + if err: + module.fail_json(rc=rc, msg=err, cmd=cmd) + + module.exit_json( + msg='setting changed', + diff=dict( + before_header=' '.join(args), + before=old_value + "\n", + after_header=' '.join(args), + after=(new_value or '') + "\n" + ), + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/ansible_collections/community/general/plugins/modules/github_deploy_key.py new file mode 100644 index 000000000..322650bf7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_deploy_key.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: github_deploy_key +author: "Ali (@bincyber)" +short_description: Manages deploy keys for GitHub repositories +description: + - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, + username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin + rights on the repository are required." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + github_url: + description: + - The base URL of the GitHub API + required: false + type: str + version_added: '0.2.0' + default: https://api.github.com + owner: + description: + - The name of the individual account or organization that owns the GitHub repository. + required: true + aliases: [ 'account', 'organization' ] + type: str + repo: + description: + - The name of the GitHub repository. + required: true + aliases: [ 'repository' ] + type: str + name: + description: + - The name for the deploy key. + required: true + aliases: [ 'title', 'label' ] + type: str + key: + description: + - The SSH public key to add to the repository as a deploy key. + required: true + type: str + read_only: + description: + - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. + type: bool + default: true + state: + description: + - The state of the deploy key. + default: "present" + choices: [ "present", "absent" ] + type: str + force: + description: + - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. + type: bool + default: false + username: + description: + - The username to authenticate with. Should not be set when using personal access token + type: str + password: + description: + - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. + type: str + token: + description: + - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). + type: str + otp: + description: + - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). + type: int +notes: + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +''' + +EXAMPLES = ''' +- name: Add a new read-only deploy key to a GitHub repository using basic authentication + community.general.github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: true + username: "johndoe" + password: "supersecretpassword" + +- name: Remove an existing deploy key from a GitHub repository + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + force: true + username: "johndoe" + password: "supersecretpassword" + state: absent + +- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "new-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + force: true + token: "ABAQDAwXxn7kIMNWzcDfo..." + +- name: Re-add a deploy key to a GitHub repository but with a different name + community.general.github_deploy_key: + owner: "johndoe" + repository: "example" + name: "replace-deploy-key" + key: "{{ lookup('file', '~/.ssh/github.pub') }}" + username: "johndoe" + password: "supersecretpassword" + +- name: Add a new deploy key to a GitHub repository using 2FA + community.general.github_deploy_key: + owner: "johndoe" + repo: "example" + name: "new-deploy-key-2" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + username: "johndoe" + password: "supersecretpassword" + otp: 123456 + +- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise + community.general.github_deploy_key: + github_url: "https://api.example.com" + owner: "janedoe" + repo: "example" + name: "new-deploy-key" + key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." + read_only: true + username: "janedoe" + password: "supersecretpassword" +''' + +RETURN = ''' +msg: + description: the status message describing what occurred + returned: always + type: str + sample: "Deploy key added successfully" + +http_status_code: + description: the HTTP status code returned by the GitHub API + returned: failed + type: int + sample: 400 + +error: + description: the error message returned by the GitHub API + returned: failed + type: str + sample: "key is already in use" + +id: + description: the key identifier assigned by GitHub for the deploy key + returned: changed + type: int + sample: 24381901 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from re import findall + + +class GithubDeployKey(object): + def __init__(self, module): + self.module = module + + self.github_url = self.module.params['github_url'] + self.name = module.params['name'] + self.key = module.params['key'] + self.state = module.params['state'] + self.read_only = module.params.get('read_only', True) + self.force = module.params.get('force', False) + self.username = module.params.get('username', None) + self.password = module.params.get('password', None) + self.token = module.params.get('token', None) + self.otp = module.params.get('otp', None) + + @property + def url(self): + owner = self.module.params['owner'] + repo = self.module.params['repo'] + return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo) + + @property + def headers(self): + if self.username is not None and self.password is not None: + self.module.params['url_username'] = self.username + self.module.params['url_password'] = self.password + self.module.params['force_basic_auth'] = True + if self.otp is not None: + return {"X-GitHub-OTP": self.otp} + elif self.token is not None: + return {"Authorization": "token {0}".format(self.token)} + else: + return None + + def paginate(self, url): + while url: + resp, info = fetch_url(self.module, url, headers=self.headers, method="GET") + + if info["status"] == 200: + yield self.module.from_json(resp.read()) + + links = {} + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): + links[y] = x + + url = links.get('next') + else: + self.handle_error(method="GET", info=info) + + def get_existing_key(self): + for keys in self.paginate(self.url): + if keys: + for i in keys: + existing_key_id = str(i["id"]) + if i["key"].split() == self.key.split()[:2]: + return existing_key_id + elif i['title'] == self.name and self.force: + return existing_key_id + else: + return None + + def add_new_key(self): + request_body = {"title": self.name, "key": self.key, "read_only": self.read_only} + + resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30) + + status_code = info["status"] + + if status_code == 201: + response_body = self.module.from_json(resp.read()) + key_id = response_body["id"] + self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) + elif status_code == 422: + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) + + def remove_existing_key(self, key_id): + resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE") + + status_code = info["status"] + + if status_code == 204: + if self.state == 'absent': + self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id) + else: + self.handle_error(method="DELETE", info=info, key_id=key_id) + + def handle_error(self, method, info, key_id=None): + status_code = info['status'] + body = info.get('body') + if body: + err = self.module.from_json(body)['message'] + + if status_code == 401: + self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) + elif status_code == 404: + self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err) + else: + if method == "GET": + self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err) + elif method == "POST": + self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err) + elif method == "DELETE": + self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + github_url=dict(required=False, type='str', default="https://api.github.com"), + owner=dict(required=True, type='str', aliases=['account', 'organization']), + repo=dict(required=True, type='str', aliases=['repository']), + name=dict(required=True, type='str', aliases=['title', 'label']), + key=dict(required=True, type='str', no_log=False), + read_only=dict(required=False, type='bool', default=True), + state=dict(default='present', choices=['present', 'absent']), + force=dict(required=False, type='bool', default=False), + username=dict(required=False, type='str'), + password=dict(required=False, type='str', no_log=True), + otp=dict(required=False, type='int', no_log=True), + token=dict(required=False, type='str', no_log=True) + ), + mutually_exclusive=[ + ['password', 'token'] + ], + required_together=[ + ['username', 'password'], + ['otp', 'username', 'password'] + ], + required_one_of=[ + ['username', 'token'] + ], + supports_check_mode=True, + ) + + deploy_key = GithubDeployKey(module) + + if module.check_mode: + key_id = deploy_key.get_existing_key() + if deploy_key.state == "present" and key_id is None: + module.exit_json(changed=True) + elif deploy_key.state == "present" and key_id is not None: + module.exit_json(changed=False) + + # to forcefully modify an existing key, the existing key must be deleted first + if deploy_key.state == 'absent' or deploy_key.force: + key_id = deploy_key.get_existing_key() + + if key_id is not None: + deploy_key.remove_existing_key(key_id) + elif deploy_key.state == 'absent': + module.exit_json(changed=False, msg="Deploy key does not exist") + + if deploy_key.state == "present": + deploy_key.add_new_key() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_issue.py b/ansible_collections/community/general/plugins/modules/github_issue.py new file mode 100644 index 000000000..4e10e9f92 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_issue.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-18, Abhijeet Kasurde +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: github_issue +short_description: View GitHub issue +description: + - View GitHub issue for a given repository and organization. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + repo: + description: + - Name of repository from which issue needs to be retrieved. + required: true + type: str + organization: + description: + - Name of the GitHub organization in which the repository is hosted. + required: true + type: str + issue: + description: + - Issue number for which information is required. + required: true + type: int + action: + description: + - Get various details about issue depending upon action specified. + default: 'get_status' + choices: + - 'get_status' + type: str +author: + - Abhijeet Kasurde (@Akasurde) +''' + +RETURN = ''' +issue_status: + description: State of the GitHub issue + type: str + returned: success + sample: open, closed +''' + +EXAMPLES = ''' +- name: Check if GitHub issue is closed or not + community.general.github_issue: + organization: ansible + repo: ansible + issue: 23642 + action: get_status + register: r + +- name: Take action depending upon issue status + ansible.builtin.debug: + msg: Do something when issue 23642 is open + when: r.issue_status == 'open' +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + argument_spec=dict( + organization=dict(required=True), + repo=dict(required=True), + issue=dict(type='int', required=True), + action=dict(choices=['get_status'], default='get_status'), + ), + supports_check_mode=True, + ) + + organization = module.params['organization'] + repo = module.params['repo'] + issue = module.params['issue'] + action = module.params['action'] + + result = dict() + + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + + url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue) + + response, info = fetch_url(module, url, headers=headers) + if not (200 <= info['status'] < 400): + if info['status'] == 404: + module.fail_json(msg="Failed to find issue %s" % issue) + module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg'])) + + gh_obj = json.loads(response.read()) + + if action == 'get_status' or action is None: + if module.check_mode: + result.update(changed=True) + else: + result.update(changed=True, issue_status=gh_obj['state']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_key.py b/ansible_collections/community/general/plugins/modules/github_key.py new file mode 100644 index 000000000..683a963a7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_key.py @@ -0,0 +1,250 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: github_key +short_description: Manage GitHub access keys +description: + - Creates, removes, or updates GitHub access keys. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + description: + - GitHub Access Token with permission to list and create public keys. + required: true + type: str + name: + description: + - SSH key name + required: true + type: str + pubkey: + description: + - SSH public key value. Required when I(state=present). + type: str + state: + description: + - Whether to remove a key, ensure that it exists, or update its value. + choices: ['present', 'absent'] + default: 'present' + type: str + force: + description: + - The default is C(true), which will replace the existing remote key + if it's different than C(pubkey). If C(false), the key will only be + set if no key with the given I(name) exists. + type: bool + default: true + +author: Robert Estelle (@erydo) +''' + +RETURN = ''' +deleted_keys: + description: An array of key objects that were deleted. Only present on state=absent + type: list + returned: When state=absent + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] +matching_keys: + description: An array of keys matching the specified name. Only present on state=present + type: list + returned: When state=present + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] +key: + description: Metadata about the key just created. Only present on state=present + type: dict + returned: success + sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false} +''' + +EXAMPLES = ''' +- name: Read SSH public key to authorize + ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub + register: ssh_pub_key + +- name: Authorize key with GitHub + local_action: + module: github_key + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: '{{ ssh_pub_key.stdout }}' +''' + + +import json +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +API_BASE = 'https://api.github.com' + + +class GitHubResponse(object): + def __init__(self, response, info): + self.content = response.read() + self.info = info + + def json(self): + return json.loads(self.content) + + def links(self): + links = {} + if 'link' in self.info: + link_header = self.info['link'] + matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) + for url, rel in matches: + links[rel] = url + return links + + +class GitHubSession(object): + def __init__(self, module, token): + self.module = module + self.token = token + + def request(self, method, url, data=None): + headers = { + 'Authorization': 'token %s' % self.token, + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + response, info = fetch_url( + self.module, url, method=method, data=data, headers=headers) + if not (200 <= info['status'] < 400): + self.module.fail_json( + msg=(" failed to send request %s to %s: %s" + % (method, url, info['msg']))) + return GitHubResponse(response, info) + + +def get_all_keys(session): + url = API_BASE + '/user/keys' + result = [] + while url: + r = session.request('GET', url) + result.extend(r.json()) + url = r.links().get('next') + return result + + +def create_key(session, name, pubkey, check_mode): + if check_mode: + from datetime import datetime + now = datetime.utcnow() + return { + 'id': 0, + 'key': pubkey, + 'title': name, + 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', + 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'read_only': False, + 'verified': False + } + else: + return session.request( + 'POST', + API_BASE + '/user/keys', + data=json.dumps({'title': name, 'key': pubkey})).json() + + +def delete_keys(session, to_delete, check_mode): + if check_mode: + return + + for key in to_delete: + session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) + + +def ensure_key_absent(session, name, check_mode): + to_delete = [key for key in get_all_keys(session) if key['title'] == name] + delete_keys(session, to_delete, check_mode=check_mode) + + return {'changed': bool(to_delete), + 'deleted_keys': to_delete} + + +def ensure_key_present(module, session, name, pubkey, force, check_mode): + all_keys = get_all_keys(session) + matching_keys = [k for k in all_keys if k['title'] == name] + deleted_keys = [] + + new_signature = pubkey.split(' ')[1] + for key in all_keys: + existing_signature = key['key'].split(' ')[1] + if new_signature == existing_signature and key['title'] != name: + module.fail_json(msg=( + "another key with the same content is already registered " + "under the name |{0}|").format(key['title'])) + + if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature: + delete_keys(session, matching_keys, check_mode=check_mode) + (deleted_keys, matching_keys) = (matching_keys, []) + + if not matching_keys: + key = create_key(session, name, pubkey, check_mode=check_mode) + else: + key = matching_keys[0] + + return { + 'changed': bool(deleted_keys or not matching_keys), + 'deleted_keys': deleted_keys, + 'matching_keys': matching_keys, + 'key': key + } + + +def main(): + argument_spec = { + 'token': {'required': True, 'no_log': True}, + 'name': {'required': True}, + 'pubkey': {}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + 'force': {'default': True, 'type': 'bool'}, + } + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + token = module.params['token'] + name = module.params['name'] + state = module.params['state'] + force = module.params['force'] + pubkey = module.params.get('pubkey') + + if pubkey: + pubkey_parts = pubkey.split(' ') + # Keys consist of a protocol, the key data, and an optional comment. + if len(pubkey_parts) < 2: + module.fail_json(msg='"pubkey" parameter has an invalid format') + elif state == 'present': + module.fail_json(msg='"pubkey" is required when state=present') + + session = GitHubSession(module, token) + if state == 'present': + result = ensure_key_present(module, session, name, pubkey, force=force, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_key_absent(session, name, check_mode=module.check_mode) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_release.py b/ansible_collections/community/general/plugins/modules/github_release.py new file mode 100644 index 000000000..3ddd6c882 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_release.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Team +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: github_release +short_description: Interact with GitHub Releases +description: + - Fetch metadata about GitHub Releases +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). + type: str + user: + description: + - The GitHub account that owns the repository + type: str + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with C(token). + type: str + repo: + description: + - Repository name + type: str + required: true + action: + description: + - Action to perform + type: str + required: true + choices: [ 'latest_release', 'create_release' ] + tag: + description: + - Tag name when creating a release. Required when using action is set to C(create_release). + type: str + target: + description: + - Target of release when creating a release + type: str + name: + description: + - Name of release when creating a release + type: str + body: + description: + - Description of the release when creating a release + type: str + draft: + description: + - Sets if the release is a draft or not. (boolean) + type: bool + default: false + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean) + type: bool + default: false + +author: + - "Adrian Moisey (@adrianmoisey)" +requirements: + - "github3.py >= 1.0.0a3" +''' + +EXAMPLES = ''' +- name: Get latest release of a public repository + community.general.github_release: + user: ansible + repo: ansible + action: latest_release + +- name: Get latest release of testuseer/testrepo + community.general.github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: latest_release + +- name: Get latest release of test repo using username and password. Ansible 2.4. + community.general.github_release: + user: testuser + password: secret123 + repo: testrepo + action: latest_release + +- name: Create a new release + community.general.github_release: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: create_release + tag: test + target: master + name: My Release + body: Some description + +''' + +RETURN = ''' +tag: + description: Version of the created/latest release. + type: str + returned: success + sample: 1.1.0 +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github3 + + HAS_GITHUB_API = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(required=True), + user=dict(required=True), + password=dict(no_log=True), + token=dict(no_log=True), + action=dict( + required=True, choices=['latest_release', 'create_release']), + tag=dict(type='str'), + target=dict(type='str'), + name=dict(type='str'), + body=dict(type='str'), + draft=dict(type='bool', default=False), + prerelease=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=(('password', 'token'),), + required_if=[('action', 'create_release', ['tag']), + ('action', 'create_release', ['password', 'token'], True)], + ) + + if not HAS_GITHUB_API: + module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'), + exception=GITHUB_IMP_ERR) + + repo = module.params['repo'] + user = module.params['user'] + password = module.params['password'] + login_token = module.params['token'] + action = module.params['action'] + tag = module.params.get('tag') + target = module.params.get('target') + name = module.params.get('name') + body = module.params.get('body') + draft = module.params.get('draft') + prerelease = module.params.get('prerelease') + + # login to github + try: + if password: + gh_obj = github3.login(user, password=password) + elif login_token: + gh_obj = github3.login(token=login_token) + else: + gh_obj = github3.GitHub() + + # test if we're actually logged in + if password or login_token: + gh_obj.me() + except github3.exceptions.AuthenticationFailed as e: + module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) + + repository = gh_obj.repository(user, repo) + + if not repository: + module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) + + if action == 'latest_release': + release = repository.latest_release() + if release: + module.exit_json(tag=release.tag_name) + else: + module.exit_json(tag=None) + + if action == 'create_release': + release_exists = repository.release_from_tag(tag) + if release_exists: + module.exit_json(changed=False, msg="Release for tag %s already exists." % tag) + + release = repository.create_release( + tag, target, name, body, draft, prerelease) + if release: + module.exit_json(changed=True, tag=release.tag_name) + else: + module.exit_json(changed=False, tag=None) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_repo.py b/ansible_collections/community/general/plugins/modules/github_repo.py new file mode 100644 index 000000000..97076c58a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_repo.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Álvaro Torres Cogollo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: github_repo +short_description: Manage your repositories on Github +version_added: 2.2.0 +description: + - Manages Github repositories using PyGithub library. + - Authentication can be done with I(access_token) or with I(username) and I(password). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + description: + - Username used for authentication. + - This is only needed when not using I(access_token). + type: str + required: false + password: + description: + - Password used for authentication. + - This is only needed when not using I(access_token). + type: str + required: false + access_token: + description: + - Token parameter for authentication. + - This is only needed when not using I(username) and I(password). + type: str + required: false + name: + description: + - Repository name. + type: str + required: true + description: + description: + - Description for the repository. + - Defaults to empty if I(force_defaults=true), which is the default in this module. + - Defaults to empty if I(force_defaults=false) when creating a new repository. + - This is only used when I(state) is C(present). + type: str + required: false + private: + description: + - Whether the repository should be private or not. + - Defaults to C(false) if I(force_defaults=true), which is the default in this module. + - Defaults to C(false) if I(force_defaults=false) when creating a new repository. + - This is only used when I(state) is C(present). + type: bool + required: false + state: + description: + - Whether the repository should exist or not. + type: str + default: present + choices: [ absent, present ] + required: false + organization: + description: + - Organization for the repository. + - When I(state) is C(present), the repository will be created in the current user profile. + type: str + required: false + api_url: + description: + - URL to the GitHub API if not using github.com but you own instance. + type: str + default: 'https://api.github.com' + version_added: "3.5.0" + force_defaults: + description: + - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default. + - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false). + type: bool + default: true + required: false + version_added: 4.1.0 +requirements: +- PyGithub>=1.54 +notes: +- For Python 3, PyGithub>=1.54 should be used. +- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." +- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." +author: +- Álvaro Torres Cogollo (@atorrescogollo) +''' + +EXAMPLES = ''' +- name: Create a Github repository + community.general.github_repo: + access_token: mytoken + organization: MyOrganization + name: myrepo + description: "Just for fun" + private: true + state: present + force_defaults: false + register: result + +- name: Delete the repository + community.general.github_repo: + username: octocat + password: password + organization: MyOrganization + name: myrepo + state: absent + register: result +''' + +RETURN = ''' +repo: + description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). + returned: success and I(state) is C(present) + type: dict +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +GITHUB_IMP_ERR = None +try: + from github import Github, GithubException, GithubObject + from github.GithubException import UnknownObjectException + HAS_GITHUB_PACKAGE = True +except Exception: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_PACKAGE = False + + +def authenticate(username=None, password=None, access_token=None, api_url=None): + if not api_url: + return None + + if access_token: + return Github(base_url=api_url, login_or_token=access_token) + else: + return Github(base_url=api_url, login_or_token=username, password=password) + + +def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False): + result = dict( + changed=False, + repo=dict()) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + + repo = None + try: + repo = target.get_repo(name=name) + result['repo'] = repo.raw_data + except UnknownObjectException: + if not check_mode: + repo = target.create_repo( + name=name, + private=GithubObject.NotSet if private is None else private, + description=GithubObject.NotSet if description is None else description, + ) + result['repo'] = repo.raw_data + + result['changed'] = True + + changes = {} + if private is not None: + if repo is None or repo.raw_data['private'] != private: + changes['private'] = private + if description is not None: + if repo is None or repo.raw_data['description'] not in (description, description or None): + changes['description'] = description + + if changes: + if not check_mode: + repo.edit(**changes) + + result['repo'].update({ + 'private': repo._private.value if not check_mode else private, + 'description': repo._description.value if not check_mode else description, + }) + result['changed'] = True + + return result + + +def delete_repo(gh, name, organization=None, check_mode=False): + result = dict(changed=False) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + try: + repo = target.get_repo(name=name) + if not check_mode: + repo.delete() + result['changed'] = True + except UnknownObjectException: + pass + + return result + + +def run_module(params, check_mode=False): + if params['force_defaults']: + params['description'] = params['description'] or '' + params['private'] = params['private'] or False + + gh = authenticate( + username=params['username'], password=params['password'], access_token=params['access_token'], + api_url=params['api_url']) + if params['state'] == "absent": + return delete_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + check_mode=check_mode + ) + else: + return create_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + private=params['private'], + description=params['description'], + check_mode=check_mode + ) + + +def main(): + module_args = dict( + username=dict(type='str'), + password=dict(type='str', no_log=True), + access_token=dict(type='str', no_log=True), + name=dict(type='str', required=True), + state=dict(type='str', required=False, default="present", + choices=["present", "absent"]), + organization=dict(type='str', required=False, default=None), + private=dict(type='bool'), + description=dict(type='str'), + api_url=dict(type='str', required=False, default='https://api.github.com'), + force_defaults=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[('username', 'password')], + required_one_of=[('username', 'access_token')], + mutually_exclusive=[('username', 'access_token')] + ) + + if not HAS_GITHUB_PACKAGE: + module.fail_json(msg=missing_required_lib( + "PyGithub"), exception=GITHUB_IMP_ERR) + + try: + result = run_module(module.params, module.check_mode) + module.exit_json(**result) + except GithubException as e: + module.fail_json(msg="Github error. {0}".format(repr(e))) + except Exception as e: + module.fail_json(msg="Unexpected error. {0}".format(repr(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_webhook.py b/ansible_collections/community/general/plugins/modules/github_webhook.py new file mode 100644 index 000000000..d47b7a82f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_webhook.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: github_webhook +short_description: Manage GitHub webhooks +description: + - "Create and delete GitHub webhooks" +requirements: + - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repository: + description: + - Full name of the repository to configure a hook for + type: str + required: true + aliases: + - repo + url: + description: + - URL to which payloads will be delivered + type: str + required: true + content_type: + description: + - The media type used to serialize the payloads + type: str + required: false + choices: [ form, json ] + default: form + secret: + description: + - The shared secret between GitHub and the payload URL. + type: str + required: false + insecure_ssl: + description: + - > + Flag to indicate that GitHub should skip SSL verification when calling + the hook. + required: false + type: bool + default: false + events: + description: + - > + A list of GitHub events the hook is triggered for. Events are listed at + U(https://developer.github.com/v3/activity/events/types/). Required + unless C(state) is C(absent) + required: false + type: list + elements: str + active: + description: + - Whether or not the hook is active + required: false + type: bool + default: true + state: + description: + - Whether the hook should be present or absent + type: str + required: false + choices: [ absent, present ] + default: present + user: + description: + - User to authenticate to GitHub as + type: str + required: true + password: + description: + - Password to authenticate to GitHub with + type: str + required: false + token: + description: + - Token to authenticate to GitHub with + type: str + required: false + github_url: + description: + - Base URL of the GitHub API + type: str + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +- name: Create a new webhook that triggers on push (password auth) + community.general.github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + events: + - push + user: "{{ github_user }}" + password: "{{ github_password }}" + +- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth) + community.general.github_webhook: + repository: myorg/myrepo + url: https://jenkins.example.com/ghprbhook/ + content_type: json + secret: "{{ github_shared_secret }}" + insecure_ssl: true + events: + - issue_comment + - pull_request + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com + +- name: Delete a webhook (password auth) + community.general.github_webhook: + repository: ansible/ansible + url: https://www.example.com/hooks/ + state: absent + user: "{{ github_user }}" + password: "{{ github_password }}" +''' + +RETURN = ''' +--- +hook_id: + description: The GitHub ID of the hook created/updated + returned: when state is 'present' + type: int + sample: 6206 +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def _create_hook_config(module): + hook_config = { + "url": module.params["url"], + "content_type": module.params["content_type"], + "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" + } + + secret = module.params.get("secret") + if secret: + hook_config["secret"] = secret + + return hook_config + + +def create_hook(repo, module): + config = _create_hook_config(module) + try: + hook = repo.create_hook( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + except github.GithubException as err: + module.fail_json(msg="Unable to create hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return True, data + + +def update_hook(repo, hook, module): + config = _create_hook_config(module) + try: + hook.update() + hook.edit( + name="web", + config=config, + events=module.params["events"], + active=module.params["active"]) + + changed = hook.update() + except github.GithubException as err: + module.fail_json(msg="Unable to modify hook for repository %s: %s" % ( + repo.full_name, to_native(err))) + + data = {"hook_id": hook.id} + return changed, data + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=['repo']), + url=dict(type='str', required=True), + content_type=dict( + type='str', + choices=('json', 'form'), + required=False, + default='form'), + secret=dict(type='str', required=False, no_log=True), + insecure_ssl=dict(type='bool', required=False, default=False), + events=dict(type='list', elements='str', required=False), + active=dict(type='bool', required=False, default=True), + state=dict( + type='str', + required=False, + choices=('absent', 'present'), + default='present'), + user=dict(type='str', required=True), + password=dict(type='str', required=False, no_log=True), + token=dict(type='str', required=False, no_log=True), + github_url=dict( + type='str', required=False, default="https://api.github.com")), + mutually_exclusive=(('password', 'token'),), + required_one_of=(("password", "token"),), + required_if=(("state", "present", ("events",)),), + ) + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + hook = None + try: + for hook in repo.get_hooks(): + if hook.config.get("url") == module.params["url"]: + break + else: + hook = None + except github.GithubException as err: + module.fail_json(msg="Unable to get hooks from repository %s: %s" % ( + module.params["repository"], to_native(err))) + + changed = False + data = {} + if hook is None and module.params["state"] == "present": + changed, data = create_hook(repo, module) + elif hook is not None and module.params["state"] == "absent": + try: + hook.delete() + except github.GithubException as err: + module.fail_json( + msg="Unable to delete hook from repository %s: %s" % ( + repo.full_name, to_native(err))) + else: + changed = True + elif hook is not None and module.params["state"] == "present": + changed, data = update_hook(repo, hook, module) + # else, there is no hook and we want there to be no hook + + module.exit_json(changed=changed, **data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/ansible_collections/community/general/plugins/modules/github_webhook_info.py new file mode 100644 index 000000000..a6f7c3e52 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/github_webhook_info.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: github_webhook_info +short_description: Query information about GitHub webhooks +description: + - "Query information about GitHub webhooks" + - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. +requirements: + - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + repository: + description: + - Full name of the repository to configure a hook for + type: str + required: true + aliases: + - repo + user: + description: + - User to authenticate to GitHub as + type: str + required: true + password: + description: + - Password to authenticate to GitHub with + type: str + required: false + token: + description: + - Token to authenticate to GitHub with + type: str + required: false + github_url: + description: + - Base URL of the github api + type: str + required: false + default: https://api.github.com + +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +- name: List hooks for a repository (password auth) + community.general.github_webhook_info: + repository: ansible/ansible + user: "{{ github_user }}" + password: "{{ github_password }}" + register: ansible_webhooks + +- name: List hooks for a repository on GitHub Enterprise (token auth) + community.general.github_webhook_info: + repository: myorg/myrepo + user: "{{ github_user }}" + token: "{{ github_user_api_token }}" + github_url: https://github.example.com/api/v3/ + register: myrepo_webhooks +''' + +RETURN = ''' +--- +hooks: + description: A list of hooks that exist for the repo + returned: always + type: list + elements: dict + sample: + - { + "has_shared_secret": true, + "url": "https://jenkins.example.com/ghprbhook/", + "events": ["issue_comment", "pull_request"], + "insecure_ssl": "1", + "content_type": "json", + "active": true, + "id": 6206, + "last_response": {"status": "active", "message": "OK", "code": 200} + } +''' + +import traceback + +GITHUB_IMP_ERR = None +try: + import github + HAS_GITHUB = True +except ImportError: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def _munge_hook(hook_obj): + retval = { + "active": hook_obj.active, + "events": hook_obj.events, + "id": hook_obj.id, + "url": hook_obj.url, + } + retval.update(hook_obj.config) + retval["has_shared_secret"] = "secret" in retval + if "secret" in retval: + del retval["secret"] + + retval["last_response"] = hook_obj.last_response.raw_data + return retval + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repository=dict(type='str', required=True, aliases=["repo"]), + user=dict(type='str', required=True), + password=dict(type='str', required=False, no_log=True), + token=dict(type='str', required=False, no_log=True), + github_url=dict( + type='str', required=False, default="https://api.github.com")), + mutually_exclusive=(('password', 'token'), ), + required_one_of=(("password", "token"), ), + supports_check_mode=True) + + if not HAS_GITHUB: + module.fail_json(msg=missing_required_lib('PyGithub'), + exception=GITHUB_IMP_ERR) + + try: + github_conn = github.Github( + module.params["user"], + module.params.get("password") or module.params.get("token"), + base_url=module.params["github_url"]) + except github.GithubException as err: + module.fail_json(msg="Could not connect to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + + try: + repo = github_conn.get_repo(module.params["repository"]) + except github.BadCredentialsException as err: + module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % ( + module.params["github_url"], to_native(err))) + except github.UnknownObjectException as err: + module.fail_json( + msg="Could not find repository %s in GitHub at %s: %s" % ( + module.params["repository"], module.params["github_url"], + to_native(err))) + except Exception as err: + module.fail_json( + msg="Could not fetch repository %s from GitHub at %s: %s" % + (module.params["repository"], module.params["github_url"], + to_native(err)), + exception=traceback.format_exc()) + + try: + hooks = [_munge_hook(h) for h in repo.get_hooks()] + except github.GithubException as err: + module.fail_json( + msg="Unable to get hooks from repository %s: %s" % + (module.params["repository"], to_native(err)), + exception=traceback.format_exc()) + + module.exit_json(changed=False, hooks=hooks) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_branch.py new file mode 100644 index 000000000..d7eecb33f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_branch.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_branch +short_description: Create or delete a branch +version_added: 4.2.0 +description: + - This module allows to create or delete branches. +author: + - paytroff (@paytroff) +requirements: + - python >= 2.7 + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + description: + - Create or delete branch. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path or name of the project. + required: true + type: str + branch: + description: + - The name of the branch that needs to be created. + required: true + type: str + ref_branch: + description: + - Reference branch to create from. + - This must be specified if I(state=present). + type: str +''' + + +EXAMPLES = ''' +- name: Create branch branch2 from main + community.general.gitlab_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + branch: branch2 + ref_branch: main + state: present + +- name: Delete branch branch2 + community.general.gitlab_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + branch: branch2 + state: absent + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitlabBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + + def get_project(self, project): + try: + return self.repo.projects.get(project) + except Exception as e: + return False + + def get_branch(self, branch): + try: + return self.project.branches.get(branch) + except Exception as e: + return False + + def create_branch(self, branch, ref_branch): + return self.project.branches.create({'branch': branch, 'ref': ref_branch}) + + def delete_branch(self, branch): + return branch.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + branch=dict(type='str', required=True), + ref_branch=dict(type='str', required=False), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + required_if=[ + ['state', 'present', ['ref_branch'], True], + ], + supports_check_mode=False + ) + ensure_gitlab_package(module) + + project = module.params['project'] + branch = module.params['branch'] + ref_branch = module.params['ref_branch'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + gitlab_instance = gitlab_authentication(module) + this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + this_branch = this_gitlab.get_branch(branch) + + if not this_branch and state == "present": + r_branch = this_gitlab.get_branch(ref_branch) + if not r_branch: + module.fail_json(msg="Ref branch {b} not exist.".format(b=ref_branch)) + this_gitlab.create_branch(branch, ref_branch) + module.exit_json(changed=True, msg="Created the branch {b}.".format(b=branch)) + elif this_branch and state == "present": + module.exit_json(changed=False, msg="Branch {b} already exist".format(b=branch)) + elif this_branch and state == "absent": + try: + this_gitlab.delete_branch(this_branch) + module.exit_json(changed=True, msg="Branch {b} deleted.".format(b=branch)) + except Exception as e: + module.fail_json(msg="Error delete branch.", exception=traceback.format_exc()) + else: + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py new file mode 100644 index 000000000..27cb01f87 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Based on code: +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_deploy_key +short_description: Manages GitLab project deploy keys +description: + - Adds, updates and removes project deploy keys +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - Id or Full path of project in the form of group/name. + required: true + type: str + title: + description: + - Deploy key's title. + required: true + type: str + key: + description: + - Deploy key + required: true + type: str + can_push: + description: + - Whether this key can push to the project. + type: bool + default: false + state: + description: + - When C(present) the deploy key added to the project if it doesn't exist. + - When C(absent) it will be removed from the project if it exists. + default: present + type: str + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +- name: "Adding a project deploy key" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." + +- name: "Update the above deploy key to add push access" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + title: "Jenkins CI" + state: present + can_push: true + +- name: "Remove the previous deploy key from the project" + community.general.gitlab_deploy_key: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + project: "my_group/my_project" + state: absent + key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." + +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: key is already in use" + +deploy_key: + description: API object + returned: always + type: dict +''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitLabDeployKey(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.deploy_key_object = None + + ''' + @param project Project object + @param key_title Title of the key + @param key_key String of the key + @param key_can_push Option of the deploy_key + @param options Deploy key options + ''' + def create_or_update_deploy_key(self, project, key_title, key_key, options): + changed = False + + # note: unfortunately public key cannot be updated directly by + # GitLab REST API, so for that case we need to delete and + # than recreate the key + if self.deploy_key_object and self.deploy_key_object.key != key_key: + if not self._module.check_mode: + self.deploy_key_object.delete() + self.deploy_key_object = None + + # Because we have already call exists_deploy_key in main() + if self.deploy_key_object is None: + deploy_key = self.create_deploy_key(project, { + 'title': key_title, + 'key': key_key, + 'can_push': options['can_push']}) + changed = True + else: + changed, deploy_key = self.update_deploy_key(self.deploy_key_object, { + 'title': key_title, + 'can_push': options['can_push']}) + + self.deploy_key_object = deploy_key + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title) + + try: + deploy_key.save() + except Exception as e: + self._module.fail_json(msg="Failed to update deploy key: %s " % e) + return True + else: + return False + + ''' + @param project Project Object + @param arguments Attributes of the deploy_key + ''' + def create_deploy_key(self, project, arguments): + if self._module.check_mode: + return True + + try: + deploy_key = project.keys.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e)) + + return deploy_key + + ''' + @param deploy_key Deploy Key Object + @param arguments Attributes of the deploy_key + ''' + def update_deploy_key(self, deploy_key, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(deploy_key, arg_key) != arguments[arg_key]: + setattr(deploy_key, arg_key, arguments[arg_key]) + changed = True + + return (changed, deploy_key) + + ''' + @param project Project object + @param key_title Title of the key + ''' + def find_deploy_key(self, project, key_title): + deploy_keys = project.keys.list(all=True) + for deploy_key in deploy_keys: + if (deploy_key.title == key_title): + return deploy_key + + ''' + @param project Project object + @param key_title Title of the key + ''' + def exists_deploy_key(self, project, key_title): + # When project exists, object will be stored in self.project_object. + deploy_key = self.find_deploy_key(project, key_title) + if deploy_key: + self.deploy_key_object = deploy_key + return True + return False + + def delete_deploy_key(self): + if self._module.check_mode: + return True + + return self.deploy_key_object.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), + can_push=dict(type='bool', default=False), + title=dict(type='str', required=True) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + state = module.params['state'] + project_identifier = module.params['project'] + key_title = module.params['title'] + key_keyfile = module.params['key'] + key_can_push = module.params['can_push'] + + gitlab_instance = gitlab_authentication(module) + + gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier) + + deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title) + + if state == 'absent': + if deploy_key_exists: + gitlab_deploy_key.delete_deploy_key() + module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title) + else: + module.exit_json(changed=False, msg="Deploy key deleted or does not exists") + + if state == 'present': + if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}): + + module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title, + deploy_key=gitlab_deploy_key.deploy_key_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group.py b/ansible_collections/community/general/plugins/modules/gitlab_group.py new file mode 100644 index 000000000..4de1ffc5f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_group.py @@ -0,0 +1,400 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: gitlab_group +short_description: Creates/updates/deletes GitLab Groups +description: + - When the group does not exist in GitLab, it will be created. + - When the group does exist and state=absent, the group will be deleted. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - Name of the group you want to create. + required: true + type: str + path: + description: + - The path of the group you want to create, this will be api_url/group_path + - If not supplied, the group_name will be used. + type: str + description: + description: + - A description for the group. + type: str + state: + description: + - create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + parent: + description: + - Allow to create subgroups + - Id or Full path of parent group in the form of group/name + type: str + visibility: + description: + - Default visibility of the group + choices: ["private", "internal", "public"] + default: private + type: str + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + auto_devops_enabled: + description: + - Default to Auto DevOps pipeline for all projects within this group. + type: bool + version_added: 3.7.0 + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: 4.2.0 +''' + +EXAMPLES = ''' +- name: "Delete GitLab Group" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: false + name: my_first_group + state: absent + +- name: "Create GitLab Group" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + +# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group +- name: "Create GitLab SubGroup" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_group + path: my_first_group + state: present + parent: "super_parent/parent" + +# Other group which only allows sub-groups - no projects +- name: "Create GitLab Group for SubGroups only" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_main_group + path: my_main_group + state: present + project_creation_level: noone + auto_devops_enabled: false + subgroup_creation_level: maintainer +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +group: + description: API object + returned: always + type: dict +''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitLabGroup(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.group_object = None + + ''' + @param group Group object + ''' + def get_group_id(self, group): + if group is not None: + return group.id + return None + + ''' + @param name Name of the group + @param parent Parent group full path + @param options Group options + ''' + def create_or_update_group(self, name, parent, options): + changed = False + + # Because we have already call userExists in main() + if self.group_object is None: + parent_id = self.get_group_id(parent) + + payload = { + 'name': name, + 'path': options['path'], + 'parent_id': parent_id, + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], + } + if options.get('description'): + payload['description'] = options['description'] + if options.get('require_two_factor_authentication'): + payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] + group = self.create_group(payload) + + # add avatar to group + if options['avatar_path']: + try: + group.avatar = open(options['avatar_path'], 'rb') + except IOError as e: + self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) + changed = True + else: + changed, group = self.update_group(self.group_object, { + 'name': name, + 'description': options['description'], + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + }) + + self.group_object = group + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name) + + try: + group.save() + except Exception as e: + self._module.fail_json(msg="Failed to update group: %s " % e) + return True + else: + return False + + ''' + @param arguments Attributes of the group + ''' + def create_group(self, arguments): + if self._module.check_mode: + return True + + try: + # Filter out None values + filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None) + + group = self._gitlab.groups.create(filtered) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) + + return group + + ''' + @param group Group Object + @param arguments Attributes of the group + ''' + def update_group(self, group, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(group, arg_key) != arguments[arg_key]: + setattr(group, arg_key, arguments[arg_key]) + changed = True + + return (changed, group) + + def delete_group(self): + group = self.group_object + + if len(group.projects.list(all=False)) >= 1: + self._module.fail_json( + msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") + else: + if self._module.check_mode: + return True + + try: + group.delete() + except Exception as e: + self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) + + ''' + @param name Name of the groupe + @param full_path Complete path of the Group including parent group path. / + ''' + def exists_group(self, project_identifier): + # When group/user exists, object will be stored in self.group_object. + group = find_group(self._gitlab, project_identifier) + if group: + self.group_object = group + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + name=dict(type='str', required=True), + path=dict(type='str'), + description=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + parent=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + auto_devops_enabled=dict(type='bool'), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + require_two_factor_authentication=dict(type='bool'), + avatar_path=dict(type='path'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + group_name = module.params['name'] + group_path = module.params['path'] + description = module.params['description'] + state = module.params['state'] + parent_identifier = module.params['parent'] + group_visibility = module.params['visibility'] + project_creation_level = module.params['project_creation_level'] + auto_devops_enabled = module.params['auto_devops_enabled'] + subgroup_creation_level = module.params['subgroup_creation_level'] + require_two_factor_authentication = module.params['require_two_factor_authentication'] + avatar_path = module.params['avatar_path'] + + gitlab_instance = gitlab_authentication(module) + + # Define default group_path based on group_name + if group_path is None: + group_path = group_name.replace(" ", "_") + + gitlab_group = GitLabGroup(module, gitlab_instance) + + parent_group = None + if parent_identifier: + parent_group = find_group(gitlab_instance, parent_identifier) + if not parent_group: + module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + + group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) + else: + group_exists = gitlab_group.exists_group(group_path) + + if state == 'absent': + if group_exists: + gitlab_group.delete_group() + module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) + else: + module.exit_json(changed=False, msg="Group deleted or does not exists") + + if state == 'present': + if gitlab_group.create_or_update_group(group_name, parent_group, { + "path": group_path, + "description": description, + "visibility": group_visibility, + "project_creation_level": project_creation_level, + "auto_devops_enabled": auto_devops_enabled, + "subgroup_creation_level": subgroup_creation_level, + "require_two_factor_authentication": require_two_factor_authentication, + "avatar_path": avatar_path, + }): + module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.group_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py new file mode 100644 index 000000000..66298e882 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py @@ -0,0 +1,441 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gitlab_group_members +short_description: Manage group members on GitLab Server +description: + - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab. +version_added: '1.2.0' +author: Zainab Alsaffar (@zanssa) +requirements: + - python-gitlab python module <= 1.15.0 + - administrator rights on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + gitlab_group: + description: + - The C(full_path) of the GitLab group the member is added to/removed from. + - Setting this to C(name) or C(path) has been disallowed since community.general 6.0.0. Use C(full_path) instead. + required: true + type: str + gitlab_user: + description: + - A username or a list of usernames to add to/remove from the GitLab group. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + - Mutually exclusive with I(gitlab_users_access). + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the group. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab group. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + required: true + version_added: 3.6.0 + state: + description: + - State of the member in the group. + - On C(present), it adds a user to a GitLab group. + - On C(absent), it removes a user from a GitLab group. + choices: ['present', 'absent'] + default: 'present' + type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + version_added: 3.6.0 +''' + +EXAMPLES = r''' +- name: Add a user to a GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + state: present + +- name: Remove a user from a GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + state: absent + +- name: Add a list of Users to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + community.general.gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitLabGroup(object): + def __init__(self, module, gl): + self._module = module + self._gitlab = gl + + # get user id if the user exists + def get_user_id(self, gitlab_user): + user_exists = self._gitlab.users.list(username=gitlab_user, all=True) + if user_exists: + return user_exists[0].id + + # get group id if group exists + def get_group_id(self, gitlab_group): + groups = self._gitlab.groups.list(search=gitlab_group, all=True) + for group in groups: + if group.full_path == gitlab_group: + return group.id + + # get all members in a group + def get_members_in_a_group(self, gitlab_group_id): + group = self._gitlab.groups.get(gitlab_group_id) + return group.members.list(all=True) + + # get single member in a group by user name + def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): + member = None + group = self._gitlab.groups.get(gitlab_group_id) + try: + member = group.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + + # check if the user is a member of the group + def is_user_a_member(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return True + return False + + # add user to a group + def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): + group = self._gitlab.groups.get(gitlab_group_id) + add_member = group.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) + + # remove user from a group + def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): + group = self._gitlab.groups.get(gitlab_group_id) + group.members.delete(gitlab_user_id) + + # get user's access level + def get_user_access_level(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return member.access_level + + # update user's access level in a group + def update_user_access_level(self, members, gitlab_user_id, access_level): + for member in members: + if member.id == gitlab_user_id: + member.access_level = access_level + member.save() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + gitlab_group=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), + ) + ), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ], + required_if=[ + ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + 'owner': gitlab.OWNER_ACCESS, + } + + gitlab_group = module.params['gitlab_group'] + state = module.params['state'] + access_level = module.params['access_level'] + purge_users = module.params['purge_users'] + + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] + + # connect to gitlab server + gl = gitlab_authentication(module) + + group = GitLabGroup(module, gl) + + gitlab_group_id = group.get_group_id(gitlab_group) + + # group doesn't exist + if not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] + + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = group.get_members_in_a_group(gitlab_group_id) + else: + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = group.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) + else: + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = group.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the group + if not is_user_a_member: + if state == 'present': + # add user to the group + try: + if not module.check_mode: + group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) + # in case that a user is a member + else: + if state == 'present': + # compare the access level + user_access_level = group.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the group + try: + if not module.check_mode: + group.remove_user_from_group(gitlab_user_id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + group.remove_user_from_group(member.id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py new file mode 100644 index 000000000..c7befe123 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) +# Based on code: +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +module: gitlab_group_variable +short_description: Creates, updates, or deletes GitLab groups variables +version_added: 1.2.0 +description: + - Creates a group variable if it does not exist. + - When a group variable does exist, its value will be updated when the values are different. + - Variables which are untouched in the playbook, but are not untouched in the GitLab group, + they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). +author: + - Florent Madiot (@scodeman) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete group variable. + default: present + type: str + choices: ["present", "absent"] + group: + description: + - The path and name of the group. + required: true + type: str + purge: + description: + - When set to C(true), delete all variables which are not untouched in the task. + default: false + type: bool + vars: + description: + - When the list element is a simple key-value pair, set masked and protected to false. + - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can + have full control about whether a value should be masked, protected or both. + - Support for group variables requires GitLab >= 9.5. + - Support for environment_scope requires GitLab Premium >= 13.11. + - Support for protected values requires GitLab >= 9.3. + - Support for masked values requires GitLab >= 11.10. + - A I(value) must be a string or a number. + - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. + See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). + default: {} + type: dict + variables: + version_added: 4.5.0 + description: + - A list of dictionaries that represents CI/CD variables. + - This modules works internal with this sructure, even if the older I(vars) parameter is used. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when I(state=present). + type: str + masked: + description: + - Wether variable value is masked or not. + type: bool + default: false + protected: + description: + - Wether variable value is protected or not. + type: bool + default: false + variable_type: + description: + - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). + type: str + choices: [ "env_var", "file" ] + default: env_var + environment_scope: + description: + - The scope for the variable. + type: str + default: '*' +''' + + +EXAMPLES = r''' +- name: Set or update some CI/CD variables + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc123 + - name: SECRET_ACCESS_KEY + value: 3214cbad + masked: true + protected: true + variable_type: env_var + environment_scope: production + +- name: Delete one variable + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + state: absent + vars: + ACCESS_KEY_ID: abc123 +''' + +RETURN = r''' +group_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + updated: + description: A list of variables whose values were changed. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.six import string_types +from ansible.module_utils.six import integer_types + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables +) + + +def vars_to_variables(vars, module): + # transform old vars to new variables structure + variables = list() + for item, value in vars.items(): + if (isinstance(value, string_types) or + isinstance(value, (integer_types, float))): + variables.append( + { + "name": item, + "value": str(value), + "masked": False, + "protected": False, + "variable_type": "env_var", + } + ) + + elif isinstance(value, dict): + new_item = {"name": item, "value": value.get('value')} + + new_item = { + "name": item, + "value": value.get('value'), + "masked": value.get('masked'), + "protected": value.get('protected'), + "variable_type": value.get('variable_type'), + } + + if value.get('environment_scope'): + new_item['environment_scope'] = value.get('environment_scope') + + variables.append(new_item) + + else: + module.fail_json(msg="value must be of type string, integer, float or dict") + + return variables + + +class GitlabGroupVariables(object): + + def __init__(self, module, gitlab_instance): + self.repo = gitlab_instance + self.group = self.get_group(module.params['group']) + self._module = module + + def get_group(self, group_name): + return self.repo.groups.get(group_name) + + def list_all_group_variables(self): + page_nb = 1 + variables = [] + vars_page = self.group.variables.list(page=page_nb) + while len(vars_page) > 0: + variables += vars_page + page_nb += 1 + vars_page = self.group.variables.list(page=page_nb) + return variables + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "masked": var_obj.get('masked'), + "protected": var_obj.get('protected'), + "variable_type": var_obj.get('variable_type'), + } + if var_obj.get('environment_scope') is not None: + var["environment_scope"] = var_obj.get('environment_scope') + + self.group.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.group.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_group_variables() + before = [x.attributes for x in gitlab_keys] + + gitlab_keys = this_gitlab.list_all_group_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('environment_scope') is None: + item['environment_scope'] = '*' + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_group_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value does not matter on removing variables. + # key and environment scope are sufficient + for item in existing_variables: + item.pop('value') + item.pop('variable_type') + for item in requested_variables: + item.pop('value') + item.pop('variable_type') + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + change = True + + gitlab_keys = this_gitlab.list_all_group_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + group=dict(type='str', required=True), + purge=dict(type='bool', required=False, default=False), + vars=dict(type='dict', required=False, default=dict(), no_log=True), + variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + masked=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + environment_scope=dict(type='str', default='*'), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['vars', 'variables'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + purge = module.params['purge'] + var_list = module.params['vars'] + state = module.params['state'] + + if var_list: + variables = vars_to_variables(var_list, module) + else: + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + gitlab_instance = gitlab_authentication(module) + + this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) + + changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item.pop('group_id') + item['name'] = item.pop('key') + for item in before: + item.pop('group_id') + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=changed, group_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/ansible_collections/community/general/plugins/modules/gitlab_hook.py new file mode 100644 index 000000000..adf90eb7b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_hook.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Based on code: +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: gitlab_hook +short_description: Manages GitLab project hooks +description: + - Adds, updates and removes project hook +author: + - Marcus Watkins (@marwatk) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - Id or Full path of the project in the form of group/name. + required: true + type: str + hook_url: + description: + - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. + required: true + type: str + state: + description: + - When C(present) the hook will be updated to match the input or created if it doesn't exist. + - When C(absent) hook will be deleted if it exists. + default: present + type: str + choices: [ "present", "absent" ] + push_events: + description: + - Trigger hook on push events. + type: bool + default: true + push_events_branch_filter: + description: + - Branch name of wildcard to trigger hook on push events + type: str + version_added: '0.2.0' + default: '' + issues_events: + description: + - Trigger hook on issues events. + type: bool + default: false + merge_requests_events: + description: + - Trigger hook on merge requests events. + type: bool + default: false + tag_push_events: + description: + - Trigger hook on tag push events. + type: bool + default: false + note_events: + description: + - Trigger hook on note events or when someone adds a comment. + type: bool + default: false + job_events: + description: + - Trigger hook on job events. + type: bool + default: false + pipeline_events: + description: + - Trigger hook on pipeline events. + type: bool + default: false + wiki_page_events: + description: + - Trigger hook on wiki events. + type: bool + default: false + hook_validate_certs: + description: + - Whether GitLab will do SSL verification when triggering the hook. + type: bool + default: false + aliases: [ enable_ssl_verification ] + token: + description: + - Secret token to validate hook messages at the receiver. + - If this is present it will always result in a change as it cannot be retrieved from GitLab. + - Will show up in the X-GitLab-Token HTTP request header. + required: false + type: str +''' + +EXAMPLES = ''' +- name: "Adding a project hook" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: present + push_events: true + tag_push_events: true + hook_validate_certs: false + token: "my-super-secret-token-that-my-ci-server-will-check" + +- name: "Delete the previous hook" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: "my_group/my_project" + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent + +- name: "Delete a hook by numeric project id" + community.general.gitlab_hook: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + project: 10 + hook_url: "https://my-ci-server.example.com/gitlab-hook" + state: absent +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +hook: + description: API object + returned: always + type: dict +''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, ensure_gitlab_package +) + + +class GitLabHook(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.hook_object = None + + ''' + @param project Project Object + @param hook_url Url to call on event + @param description Description of the group + @param parent Parent group full path + ''' + def create_or_update_hook(self, project, hook_url, options): + changed = False + + # Because we have already call userExists in main() + if self.hook_object is None: + hook = self.create_hook(project, { + 'url': hook_url, + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token'], + }) + changed = True + else: + changed, hook = self.update_hook(self.hook_object, { + 'push_events': options['push_events'], + 'push_events_branch_filter': options['push_events_branch_filter'], + 'issues_events': options['issues_events'], + 'merge_requests_events': options['merge_requests_events'], + 'tag_push_events': options['tag_push_events'], + 'note_events': options['note_events'], + 'job_events': options['job_events'], + 'pipeline_events': options['pipeline_events'], + 'wiki_page_events': options['wiki_page_events'], + 'enable_ssl_verification': options['enable_ssl_verification'], + 'token': options['token'], + }) + + self.hook_object = hook + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url) + + try: + hook.save() + except Exception as e: + self._module.fail_json(msg="Failed to update hook: %s " % e) + + return changed + + ''' + @param project Project Object + @param arguments Attributes of the hook + ''' + def create_hook(self, project, arguments): + if self._module.check_mode: + return True + + hook = project.hooks.create(arguments) + + return hook + + ''' + @param hook Hook Object + @param arguments Attributes of the hook + ''' + def update_hook(self, hook, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if getattr(hook, arg_key, None) != arg_value: + setattr(hook, arg_key, arg_value) + changed = True + + return (changed, hook) + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def find_hook(self, project, hook_url): + hooks = project.hooks.list(all=True) + for hook in hooks: + if (hook.url == hook_url): + return hook + + ''' + @param project Project object + @param hook_url Url to call on event + ''' + def exists_hook(self, project, hook_url): + # When project exists, object will be stored in self.project_object. + hook = self.find_hook(project, hook_url) + if hook: + self.hook_object = hook + return True + return False + + def delete_hook(self): + if not self._module.check_mode: + self.hook_object.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + hook_url=dict(type='str', required=True), + push_events=dict(type='bool', default=True), + push_events_branch_filter=dict(type='str', default=''), + issues_events=dict(type='bool', default=False), + merge_requests_events=dict(type='bool', default=False), + tag_push_events=dict(type='bool', default=False), + note_events=dict(type='bool', default=False), + job_events=dict(type='bool', default=False), + pipeline_events=dict(type='bool', default=False), + wiki_page_events=dict(type='bool', default=False), + hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), + token=dict(type='str', no_log=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + state = module.params['state'] + project_identifier = module.params['project'] + hook_url = module.params['hook_url'] + push_events = module.params['push_events'] + push_events_branch_filter = module.params['push_events_branch_filter'] + issues_events = module.params['issues_events'] + merge_requests_events = module.params['merge_requests_events'] + tag_push_events = module.params['tag_push_events'] + note_events = module.params['note_events'] + job_events = module.params['job_events'] + pipeline_events = module.params['pipeline_events'] + wiki_page_events = module.params['wiki_page_events'] + enable_ssl_verification = module.params['hook_validate_certs'] + hook_token = module.params['token'] + + gitlab_instance = gitlab_authentication(module) + + gitlab_hook = GitLabHook(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + + if project is None: + module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier) + + hook_exists = gitlab_hook.exists_hook(project, hook_url) + + if state == 'absent': + if hook_exists: + gitlab_hook.delete_hook() + module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url) + else: + module.exit_json(changed=False, msg="Hook deleted or does not exists") + + if state == 'present': + if gitlab_hook.create_or_update_hook(project, hook_url, { + "push_events": push_events, + "push_events_branch_filter": push_events_branch_filter, + "issues_events": issues_events, + "merge_requests_events": merge_requests_events, + "tag_push_events": tag_push_events, + "note_events": note_events, + "job_events": job_events, + "pipeline_events": pipeline_events, + "wiki_page_events": wiki_page_events, + "enable_ssl_verification": enable_ssl_verification, + "token": hook_token, + }): + + module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project.py b/ansible_collections/community/general/plugins/modules/gitlab_project.py new file mode 100644 index 000000000..db360d578 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_project.py @@ -0,0 +1,678 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gitlab_project +short_description: Creates/updates/deletes GitLab Projects +description: + - When the project does not exist in GitLab, it will be created. + - When the project does exists and I(state=absent), the project will be deleted. + - When changes are made to the project, the project will be updated. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - Id or the full path of the group of which this projects belongs to. + type: str + name: + description: + - The name of the project. + required: true + type: str + path: + description: + - The path of the project you want to create, this will be server_url//path. + - If not supplied, name will be used. + type: str + description: + description: + - An description for the project. + type: str + initialize_with_readme: + description: + - Will initialize the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. + type: bool + default: false + version_added: "4.0.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - Possible values are true and false. + type: bool + default: true + merge_requests_enabled: + description: + - If merge requests can be made or not. + - Possible values are true and false. + type: bool + default: true + wiki_enabled: + description: + - If an wiki for this project should be available or not. + type: bool + default: true + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true + visibility: + description: + - C(private) Project access must be granted explicitly for each user. + - C(internal) The project can be cloned by any logged in user. + - C(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + import_url: + description: + - Git repository which will be imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + state: + description: + - Create or delete project. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + lfs_enabled: + description: + - Enable Git large file systems to manages large files such + as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + allow_merge_on_skipped_pipeline: + description: + - Allow merge when skipped pipelines exist. + type: bool + version_added: "3.4.0" + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + ci_config_path: + description: + - Custom path to the CI configuration file for this project. + type: str + version_added: "3.7.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: "4.2.0" + default_branch: + description: + - Default branch name for a new project. + - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true). + type: str + version_added: "4.2.0" + builds_access_level: + description: + - C(private) means that repository CI/CD is allowed only to project members. + - C(disabled) means that repository CI/CD is disabled. + - C(enabled) means that repository CI/CD is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + forking_access_level: + description: + - C(private) means that repository forks is allowed only to project members. + - C(disabled) means that repository forks are disabled. + - C(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + container_registry_access_level: + description: + - C(private) means that container registry is allowed only to project members. + - C(disabled) means that container registry is disabled. + - C(enabled) means that container registry is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + releases_access_level: + description: + - C(private) means that accessing release is allowed only to project members. + - C(disabled) means that accessing release is disabled. + - C(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + environments_access_level: + description: + - C(private) means that deployment to environment is allowed only to project members. + - C(disabled) means that deployment to environment is disabled. + - C(enabled) means that deployment to environment is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + feature_flags_access_level: + description: + - C(private) means that feature rollout is allowed only to project members. + - C(disabled) means that feature rollout is disabled. + - C(enabled) means that feature rollout is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + infrastructure_access_level: + description: + - C(private) means that configuring infrastructure is allowed only to project members. + - C(disabled) means that configuring infrastructure is disabled. + - C(enabled) means that configuring infrastructure is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + monitor_access_level: + description: + - C(private) means that monitoring health is allowed only to project members. + - C(disabled) means that monitoring health is disabled. + - C(enabled) means that monitoring health is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + security_and_compliance_access_level: + description: + - C(private) means that accessing security and complicance tab is allowed only to project members. + - C(disabled) means that accessing security and complicance tab is disabled. + - C(enabled) means that accessing security and complicance tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + topics: + description: + - A topic or list of topics to be assigned to a project. + - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). + type: list + elements: str + version_added: "6.6.0" +''' + +EXAMPLES = r''' +- name: Create GitLab Project + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_token: "{{ api_token }}" + name: my_first_project + group: "10481470" + +- name: Delete GitLab Project + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: false + name: my_first_project + state: absent + delegate_to: localhost + +- name: Create GitLab Project in group Ansible + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_first_project + group: ansible + issues_enabled: false + merge_method: rebase_merge + wiki_enabled: true + snippets_enabled: true + import_url: http://git.example.com/example/lab.git + initialize_with_readme: true + state: present + delegate_to: localhost + +- name: get the initial root password + ansible.builtin.shell: | + grep 'Password:' /etc/gitlab/initial_root_password | sed -e 's/Password\: \(.*\)/\1/' + register: initial_root_password + +- name: Create a GitLab Project using a username/password via oauth_token + community.general.gitlab_project: + api_url: https://gitlab.example.com/ + api_username: root + api_password: "{{ initial_root_password }}" + name: my_second_project + group: "10481470" +''' + +RETURN = r''' +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server. + returned: always + type: dict + +error: + description: the error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +project: + description: API object. + returned: always + type: dict +''' + + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package +) + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class GitLabProject(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.project_object = None + + ''' + @param project_name Name of the project + @param namespace Namespace Object (User or Group) + @param options Options of the project + ''' + def create_or_update_project(self, project_name, namespace, options): + changed = False + project_options = { + 'name': project_name, + 'description': options['description'], + 'issues_enabled': options['issues_enabled'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'merge_method': options['merge_method'], + 'wiki_enabled': options['wiki_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'visibility': options['visibility'], + 'lfs_enabled': options['lfs_enabled'], + 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], + 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], + 'packages_enabled': options['packages_enabled'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'squash_option': options['squash_option'], + 'ci_config_path': options['ci_config_path'], + 'shared_runners_enabled': options['shared_runners_enabled'], + 'builds_access_level': options['builds_access_level'], + 'forking_access_level': options['forking_access_level'], + 'container_registry_access_level': options['container_registry_access_level'], + 'releases_access_level': options['releases_access_level'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + } + + # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version + # and check if less than 14. If yes we use tag_list instead topics + if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"): + project_options['tag_list'] = options['topics'] + else: + project_options['topics'] = options['topics'] + + # Because we have already call userExists in main() + if self.project_object is None: + project_options.update({ + 'path': options['path'], + 'import_url': options['import_url'], + }) + if options['initialize_with_readme']: + project_options['initialize_with_readme'] = options['initialize_with_readme'] + if options['default_branch']: + project_options['default_branch'] = options['default_branch'] + + project_options = self.get_options_with_value(project_options) + project = self.create_project(namespace, project_options) + + # add avatar to project + if options['avatar_path']: + try: + project.avatar = open(options['avatar_path'], 'rb') + except IOError as e: + self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) + + changed = True + else: + changed, project = self.update_project(self.project_object, project_options) + + self.project_object = project + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) + + try: + project.save() + except Exception as e: + self._module.fail_json(msg="Failed update project: %s " % e) + return True + return False + + ''' + @param namespace Namespace Object (User or Group) + @param arguments Attributes of the project + ''' + def create_project(self, namespace, arguments): + if self._module.check_mode: + return True + + arguments['namespace_id'] = namespace.id + try: + project = self._gitlab.projects.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) + + return project + + ''' + @param arguments Attributes of the project + ''' + def get_options_with_value(self, arguments): + ret_arguments = dict() + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + ret_arguments[arg_key] = arg_value + + return ret_arguments + + ''' + @param project Project Object + @param arguments Attributes of the project + ''' + def update_project(self, project, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if getattr(project, arg_key) != arguments[arg_key]: + setattr(project, arg_key, arguments[arg_key]) + changed = True + + return (changed, project) + + def delete_project(self): + if self._module.check_mode: + return True + + project = self.project_object + + return project.delete() + + ''' + @param namespace User/Group object + @param name Name of the project + ''' + def exists_project(self, namespace, path): + # When project exists, object will be stored in self.project_object. + project = find_project(self._gitlab, namespace.full_path + '/' + path) + if project: + self.project_object = project + return True + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + group=dict(type='str'), + name=dict(type='str', required=True), + path=dict(type='str'), + description=dict(type='str'), + initialize_with_readme=dict(type='bool', default=False), + default_branch=dict(type='str'), + issues_enabled=dict(type='bool', default=True), + merge_requests_enabled=dict(type='bool', default=True), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + wiki_enabled=dict(type='bool', default=True), + snippets_enabled=dict(default=True, type='bool'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + import_url=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + lfs_enabled=dict(default=False, type='bool'), + username=dict(type='str'), + allow_merge_on_skipped_pipeline=dict(type='bool'), + only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), + only_allow_merge_if_pipeline_succeeds=dict(type='bool'), + packages_enabled=dict(type='bool'), + remove_source_branch_after_merge=dict(type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + ci_config_path=dict(type='str'), + shared_runners_enabled=dict(type='bool'), + avatar_path=dict(type='path'), + builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + topics=dict(type='list', elements='str'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['group', 'username'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + group_identifier = module.params['group'] + project_name = module.params['name'] + project_path = module.params['path'] + project_description = module.params['description'] + initialize_with_readme = module.params['initialize_with_readme'] + issues_enabled = module.params['issues_enabled'] + merge_requests_enabled = module.params['merge_requests_enabled'] + merge_method = module.params['merge_method'] + wiki_enabled = module.params['wiki_enabled'] + snippets_enabled = module.params['snippets_enabled'] + visibility = module.params['visibility'] + import_url = module.params['import_url'] + state = module.params['state'] + lfs_enabled = module.params['lfs_enabled'] + username = module.params['username'] + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] + only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] + packages_enabled = module.params['packages_enabled'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + squash_option = module.params['squash_option'] + ci_config_path = module.params['ci_config_path'] + shared_runners_enabled = module.params['shared_runners_enabled'] + avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] + builds_access_level = module.params['builds_access_level'] + forking_access_level = module.params['forking_access_level'] + container_registry_access_level = module.params['container_registry_access_level'] + releases_access_level = module.params['releases_access_level'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + infrastructure_access_level = module.params['infrastructure_access_level'] + monitor_access_level = module.params['monitor_access_level'] + security_and_compliance_access_level = module.params['security_and_compliance_access_level'] + topics = module.params['topics'] + + if default_branch and not initialize_with_readme: + module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") + + gitlab_instance = gitlab_authentication(module) + + # Set project_path to project_name if it is empty. + if project_path is None: + project_path = project_name.replace(" ", "_") + + gitlab_project = GitLabProject(module, gitlab_instance) + + namespace = None + namespace_id = None + if group_identifier: + group = find_group(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + + namespace_id = group.id + else: + if username: + namespace = gitlab_instance.namespaces.list(search=username, all=False)[0] + else: + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username, all=False)[0] + namespace_id = namespace.id + + if not namespace_id: + module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") + + try: + namespace = gitlab_instance.namespaces.get(namespace_id) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) + + if not namespace: + module.fail_json(msg="Failed to find the namespace for the project") + project_exists = gitlab_project.exists_project(namespace, project_path) + + if state == 'absent': + if project_exists: + gitlab_project.delete_project() + module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) + module.exit_json(changed=False, msg="Project deleted or does not exists") + + if state == 'present': + + if gitlab_project.create_or_update_project(project_name, namespace, { + "path": project_path, + "description": project_description, + "initialize_with_readme": initialize_with_readme, + "default_branch": default_branch, + "issues_enabled": issues_enabled, + "merge_requests_enabled": merge_requests_enabled, + "merge_method": merge_method, + "wiki_enabled": wiki_enabled, + "snippets_enabled": snippets_enabled, + "visibility": visibility, + "import_url": import_url, + "lfs_enabled": lfs_enabled, + "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, + "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, + "packages_enabled": packages_enabled, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "squash_option": squash_option, + "ci_config_path": ci_config_path, + "shared_runners_enabled": shared_runners_enabled, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "forking_access_level": forking_access_level, + "container_registry_access_level": container_registry_access_level, + "releases_access_level": releases_access_level, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "infrastructure_access_level": infrastructure_access_level, + "monitor_access_level": monitor_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, + "topics": topics, + }): + + module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) + module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.project_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py new file mode 100644 index 000000000..5b1a8d3f1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gitlab_project_badge +short_description: Manage project badges on GitLab Server +version_added: 6.1.0 +description: + - This module allows to add and remove badges to/from a project. +author: Guillaume MARTINEZ (@Lunik) +requirements: + - C(owner) or C(maintainer) rights to project on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - The name (or full path) of the GitLab project the badge is added to/removed from. + required: true + type: str + + state: + description: + - State of the badge in the project. + - On C(present), it adds a badge to a GitLab project. + - On C(absent), it removes a badge from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str + + link_url: + description: + - The URL associated with the badge. + required: true + type: str + + image_url: + description: + - The image URL of the badge. + - A badge is identified by this URL. + required: true + type: str +''' + +EXAMPLES = r''' +- name: Add a badge to a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: present + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' + +- name: Remove a badge from a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: absent + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' +''' + +RETURN = ''' +badge: + description: The badge information. + returned: when I(state=present) + type: dict + sample: + id: 1 + link_url: 'http://example.com/ci_status.svg?project=%{project_path}&ref=%{default_branch}' + image_url: 'https://shields.io/my/badge' + rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master' + rendered_image_url: 'https://shields.io/my/badge' + kind: project +''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, find_project, ensure_gitlab_package +) + + +def present_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(iterator=True): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + changed = True + if module.check_mode: + return changed, {"status": "A project badge would be created."} + + badge = project.badges.create(wished_badge) + return changed, badge.attributes + + if existing_badge.link_url != wished_badge["link_url"]: + changed = True + existing_badge.link_url = wished_badge["link_url"] + + if changed: + if module.check_mode: + return changed, {"status": "Project badge attributes would be changed."} + + existing_badge.save() + + return changed, existing_badge.attributes + + +def absent_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(iterator=True): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + return changed, None + + changed = True + if module.check_mode: + return changed, {"status": "Project badge would be destroyed."} + + existing_badge.delete() + + return changed, None + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + state = module.params['state'] + + gl = gitlab_authentication(module) + + project = find_project(gl, gitlab_project) + # project doesn't exist + if not project: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + wished_badge = { + "link_url": module.params["link_url"], + "image_url": module.params["image_url"], + } + + changed, summary = state_strategy[state](module=module, gl=gl, project=project, wished_badge=wished_badge) + + module.exit_json(changed=changed, badge=summary) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + project=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + link_url=dict(type='str', required=True), + image_url=dict(type='str', required=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py new file mode 100644 index 000000000..905358443 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py @@ -0,0 +1,449 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Sergey Mikhaltsov +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gitlab_project_members +short_description: Manage project members on GitLab Server +version_added: 2.2.0 +description: + - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. +author: + - Sergey Mikhaltsov (@metanovii) + - Zainab Alsaffar (@zanssa) +requirements: + - python-gitlab python module <= 1.15.0 + - owner or maintainer rights to project on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - The name (or full path) of the GitLab project the member is added to/removed from. + required: true + type: str + gitlab_user: + description: + - A username or a list of usernames to add to/remove from the GitLab project. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the project. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab project. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + required: true + version_added: 3.7.0 + state: + description: + - State of the member in the project. + - On C(present), it adds a user to a GitLab project. + - On C(absent), it removes a user from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + version_added: 3.7.0 +''' + +EXAMPLES = r''' +- name: Add a user to a GitLab Project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + validate_certs: true + project: projectname + gitlab_user: username + access_level: developer + state: present + +- name: Remove a user from a GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + validate_certs: false + project: projectname + gitlab_user: username + state: absent + +- name: Add a list of Users to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_project: projectname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_user: username + access_level: developer + purge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitLabProjectMembers(object): + def __init__(self, module, gl): + self._module = module + self._gitlab = gl + + def get_project(self, project_name): + try: + project_exists = self._gitlab.projects.get(project_name) + return project_exists.id + except gitlab.exceptions.GitlabGetError as e: + project_exists = self._gitlab.projects.list(search=project_name, all=False) + if project_exists: + return project_exists[0].id + + def get_user_id(self, gitlab_user): + user_exists = self._gitlab.users.list(username=gitlab_user, all=False) + if user_exists: + return user_exists[0].id + + # get all members in a project + def get_members_in_a_project(self, gitlab_project_id): + project = self._gitlab.projects.get(gitlab_project_id) + return project.members.list(all=True) + + # get single member in a project by user name + def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): + member = None + project = self._gitlab.projects.get(gitlab_project_id) + try: + member = project.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + + # check if the user is a member of the project + def is_user_a_member(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return True + return False + + # add user to a project + def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): + project = self._gitlab.projects.get(gitlab_project_id) + add_member = project.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) + + # remove user from a project + def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): + project = self._gitlab.projects.get(gitlab_project_id) + project.members.delete(gitlab_user_id) + + # get user's access level + def get_user_access_level(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return member.access_level + + # update user's access level in a project + def update_user_access_level(self, members, gitlab_user_id, access_level): + for member in members: + if member.id == gitlab_user_id: + member.access_level = access_level + member.save() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + project=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + purge_users=dict(type='list', elements='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer'], required=True), + ) + ), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], + ], + required_together=[ + ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['gitlab_user', 'gitlab_users_access'], + ], + required_if=[ + ['state', 'present', ['access_level', 'gitlab_users_access'], True], + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + } + + gitlab_project = module.params['project'] + state = module.params['state'] + access_level = module.params['access_level'] + purge_users = module.params['purge_users'] + + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] + + # connect to gitlab server + gl = gitlab_authentication(module) + + project = GitLabProjectMembers(module, gl) + + gitlab_project_id = project.get_project(gitlab_project) + + # project doesn't exist + if not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append( + {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] + + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [project.get_member_in_a_project( + gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = project.get_members_in_a_project(gitlab_project_id) + else: + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = project.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) + else: + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = project.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the project + if not is_user_a_member: + if state == 'present': + # add user to the project + try: + if not module.check_mode: + project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) + # in case that a user is a member + else: + if state == 'present': + # compare the access level + user_access_level = project.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the project + try: + if not module.check_mode: + project.remove_user_from_project(gitlab_user_id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + project.remove_user_from_project(member.id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json( + msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py new file mode 100644 index 000000000..63569dd78 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py @@ -0,0 +1,486 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_project_variable +short_description: Creates/updates/deletes GitLab Projects Variables +description: + - When a project variable does not exist, it will be created. + - When a project variable does exist, its value will be updated when the values are different. + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, + they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). +author: + - "Markus Bergholz (@markuman)" +requirements: + - python >= 2.7 + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete project variable. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path and name of the project. + required: true + type: str + purge: + description: + - When set to true, all variables which are not untouched in the task will be deleted. + default: false + type: bool + vars: + description: + - When the list element is a simple key-value pair, masked and protected will be set to false. + - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can + have full control about whether a value should be masked, protected or both. + - Support for protected values requires GitLab >= 9.3. + - Support for masked values requires GitLab >= 11.10. + - Support for environment_scope requires GitLab Premium >= 13.11. + - Support for variable_type requires GitLab >= 11.11. + - A I(value) must be a string or a number. + - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). + - Field I(environment_scope) must be a string defined by scope environment. + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. + See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + default: {} + type: dict + variables: + version_added: 4.4.0 + description: + - A list of dictionaries that represents CI/CD variables. + - This module works internal with this structure, even if the older I(vars) parameter is used. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when I(state=present). + type: str + masked: + description: + - Wether variable value is masked or not. + - Support for masked values requires GitLab >= 11.10. + type: bool + default: false + protected: + description: + - Wether variable value is protected or not. + - Support for protected values requires GitLab >= 9.3. + type: bool + default: false + variable_type: + description: + - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). + - Support for I(variable_type) requires GitLab >= 11.11. + type: str + choices: ["env_var", "file"] + default: env_var + environment_scope: + description: + - The scope for the variable. + - Support for I(environment_scope) requires GitLab Premium >= 13.11. + type: str + default: '*' +''' + + +EXAMPLES = ''' +- name: Set or update some CI/CD variables + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc123 + - name: SECRET_ACCESS_KEY + value: dassgrfaeui8989 + masked: true + protected: true + environment_scope: production + +- name: Set or update some CI/CD variables + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + variable_type: env_var + environment_scope: '*' + +- name: Delete one variable + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + state: absent + vars: + ACCESS_KEY_ID: abc123 +''' + +RETURN = ''' +project_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + updated: + description: A list of variables whose values were changed. + returned: always + type: list + sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.six import string_types +from ansible.module_utils.six import integer_types + +GITLAB_IMP_ERR = None +try: + import gitlab # noqa: F401, pylint: disable=unused-import + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables +) + + +def vars_to_variables(vars, module): + # transform old vars to new variables structure + variables = list() + for item, value in vars.items(): + if (isinstance(value, string_types) or + isinstance(value, (integer_types, float))): + variables.append( + { + "name": item, + "value": str(value), + "masked": False, + "protected": False, + "variable_type": "env_var", + } + ) + + elif isinstance(value, dict): + + new_item = { + "name": item, + "value": value.get('value'), + "masked": value.get('masked'), + "protected": value.get('protected'), + "variable_type": value.get('variable_type'), + } + + if value.get('environment_scope'): + new_item['environment_scope'] = value.get('environment_scope') + + variables.append(new_item) + + else: + module.fail_json(msg="value must be of type string, integer, float or dict") + + return variables + + +class GitlabProjectVariables(object): + + def __init__(self, module, gitlab_instance): + self.repo = gitlab_instance + self.project = self.get_project(module.params['project']) + self._module = module + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def list_all_project_variables(self): + page_nb = 1 + variables = [] + vars_page = self.project.variables.list(page=page_nb) + while len(vars_page) > 0: + variables += vars_page + page_nb += 1 + vars_page = self.project.variables.list(page=page_nb) + return variables + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "masked": var_obj.get('masked'), + "protected": var_obj.get('protected'), + "variable_type": var_obj.get('variable_type'), + } + + if var_obj.get('environment_scope') is not None: + var["environment_scope"] = var_obj.get('environment_scope') + + self.project.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.project.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')}) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_project_variables() + before = [x.attributes for x in gitlab_keys] + + gitlab_keys = this_gitlab.list_all_project_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + # filter out and enrich before compare + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('environment_scope') is None: + item['environment_scope'] = '*' + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_project_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value does not matter on removing variables. + # key and environment scope are sufficient + for item in existing_variables: + item.pop('value') + item.pop('variable_type') + for item in requested_variables: + item.pop('value') + item.pop('variable_type') + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if return_value['added'] or return_value['removed'] or return_value['updated']: + change = True + + gitlab_keys = this_gitlab.list_all_project_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + purge=dict(type='bool', required=False, default=False), + vars=dict(type='dict', required=False, default=dict(), no_log=True), + variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + masked=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + environment_scope=dict(type='str', default='*'), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]), + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['vars', 'variables'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + purge = module.params['purge'] + var_list = module.params['vars'] + state = module.params['state'] + + if var_list: + variables = vars_to_variables(var_list, module) + else: + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + gitlab_instance = gitlab_authentication(module) + + this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) + + change, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item.pop('project_id') + item['name'] = item.pop('key') + for item in before: + item.pop('project_id') + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, project_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py new file mode 100644 index 000000000..fea374cbf --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_protected_branch +short_description: Manage protection of existing branches +version_added: 3.4.0 +description: + - (un)Marking existing branches for protection. +author: + - "Werner Dijkerman (@dj-wasabi)" +requirements: + - python >= 2.7 + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete protected branch. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path and name of the project. + required: true + type: str + name: + description: + - The name of the branch that needs to be protected. + - Can make use a wildcard character for like C(production/*) or just have C(main) or C(develop) as value. + required: true + type: str + merge_access_levels: + description: + - Access levels allowed to merge. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] + push_access_level: + description: + - Access levels allowed to push. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] +''' + + +EXAMPLES = ''' +- name: Create protected branch on main + community.general.gitlab_protected_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "dj-wasabi/collection.general" + name: main + merge_access_levels: maintainer + push_access_level: nobody + +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitlabProtectedBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + self.ACCESS_LEVEL = { + 'nobody': gitlab.NO_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS + } + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def protected_branch_exist(self, name): + try: + return self.project.protectedbranches.get(name) + except Exception as e: + return False + + def create_protected_branch(self, name, merge_access_levels, push_access_level): + if self._module.check_mode: + return True + merge = self.ACCESS_LEVEL[merge_access_levels] + push = self.ACCESS_LEVEL[push_access_level] + self.project.protectedbranches.create({ + 'name': name, + 'merge_access_level': merge, + 'push_access_level': push + }) + + def compare_protected_branch(self, name, merge_access_levels, push_access_level): + configured_merge = self.ACCESS_LEVEL[merge_access_levels] + configured_push = self.ACCESS_LEVEL[push_access_level] + current = self.protected_branch_exist(name=name) + current_merge = current.merge_access_levels[0]['access_level'] + current_push = current.push_access_levels[0]['access_level'] + if current: + if current.name == name and current_merge == configured_merge and current_push == configured_push: + return True + return False + + def delete_protected_branch(self, name): + if self._module.check_mode: + return True + return self.project.protectedbranches.delete(name) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + name=dict(type='str', required=True), + merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + project = module.params['project'] + name = module.params['name'] + merge_access_levels = module.params['merge_access_levels'] + push_access_level = module.params['push_access_level'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + gitlab_instance = gitlab_authentication(module) + this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + p_branch = this_gitlab.protected_branch_exist(name=name) + if not p_branch and state == "present": + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Created the proteched branch.") + elif p_branch and state == "present": + if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): + this_gitlab.delete_protected_branch(name=name) + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Recreated the proteched branch.") + elif p_branch and state == "absent": + this_gitlab.delete_protected_branch(name=name) + module.exit_json(changed=True, msg="Deleted the proteched branch.") + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/gitlab_runner.py new file mode 100644 index 000000000..a41b135fc --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_runner.py @@ -0,0 +1,466 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Raphaël Droz (raphael.droz@gmail.com) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Samy Coenen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: gitlab_runner +short_description: Create, modify and delete GitLab Runners +description: + - Register, update and delete runners with the GitLab API. + - All operations are performed using the GitLab API v4. + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at + U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. + To create shared runners, you need to ask your administrator to give you this token. + It can be found at U(https://$GITLAB_URL/admin/runners/). +notes: + - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. + - Runners need to have unique descriptions. +author: + - Samy Coenen (@SamyCoenen) + - Guillaume Martinez (@Lunik) +requirements: + - python >= 2.7 + - python-gitlab >= 1.5.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of the group in the form group/subgroup. + - Mutually exclusive with I(owned) and I(project). + type: str + version_added: '6.5.0' + project: + description: + - ID or full path of the project in the form of group/name. + - Mutually exclusive with I(owned) since community.general 4.5.0. + - Mutually exclusive with I(group). + type: str + version_added: '3.7.0' + description: + description: + - The unique name of the runner. + required: true + type: str + aliases: + - name + state: + description: + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. + required: false + default: present + choices: ["present", "absent"] + type: str + registration_token: + description: + - The registration token is used to register new runners. + - Required if I(state) is C(present). + type: str + owned: + description: + - Searches only runners available to the user when searching for existing, when false admin token required. + - Mutually exclusive with I(project) since community.general 4.5.0. + - Mutually exclusive with I(group). + default: false + type: bool + version_added: 2.0.0 + active: + description: + - Define if the runners is immediately active after creation. + required: false + default: true + type: bool + locked: + description: + - Determines if the runner is locked or not. + required: false + default: false + type: bool + access_level: + description: + - Determines if a runner can pick up jobs only from protected branches. + - If I(access_level_on_creation) is not explicitly set to C(true), this option is ignored on registration and + is only applied on updates. + - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. + - If set to C(ref_protected), runner can pick up jobs only from protected branches. + - The current default is C(ref_protected). This will change to no default in community.general 8.0.0. + From that version on, if this option is not specified explicitly, GitLab will use C(not_protected) + on creation, and the value set will not be changed on any updates. + required: false + choices: ["not_protected", "ref_protected"] + type: str + access_level_on_creation: + description: + - Whether the runner should be registered with an access level or not. + - If set to C(true), the value of I(access_level) is used for runner registration. + - If set to C(false), GitLab registers the runner with the default access level. + - The current default of this option is C(false). This default is deprecated and will change to C(true) in commuinty.general 7.0.0. + required: false + type: bool + version_added: 6.3.0 + maximum_timeout: + description: + - The maximum time that a runner has to complete a specific job. + required: false + default: 3600 + type: int + run_untagged: + description: + - Run untagged jobs or not. + required: false + default: true + type: bool + tag_list: + description: The tags that apply to the runner. + required: false + default: [] + type: list + elements: str +''' + +EXAMPLES = ''' +- name: "Register runner" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + +- name: "Delete runner" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: absent + +- name: Delete an owned runner as a non-admin + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + owned: true + state: absent + +- name: Register runner for a specific project + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: MyProject runner + state: present + project: mygroup/mysubgroup/myproject +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +runner: + description: API object + returned: always + type: dict +''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +try: + cmp # pylint: disable=used-before-assignment +except NameError: + def cmp(a, b): + return (a > b) - (a < b) + + +class GitLabRunner(object): + def __init__(self, module, gitlab_instance, group=None, project=None): + self._module = module + self._gitlab = gitlab_instance + self.runner_object = None + + # Whether to operate on GitLab-instance-wide or project-wide runners + # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 + # for group runner token access + if project: + self._runners_endpoint = project.runners.list + elif group: + self._runners_endpoint = group.runners.list + elif module.params['owned']: + self._runners_endpoint = gitlab_instance.runners.list + else: + self._runners_endpoint = gitlab_instance.runners.all + + def create_or_update_runner(self, description, options): + changed = False + + arguments = { + 'active': options['active'], + 'locked': options['locked'], + 'run_untagged': options['run_untagged'], + 'maximum_timeout': options['maximum_timeout'], + 'tag_list': options['tag_list'], + } + if options.get('access_level') is not None: + arguments['access_level'] = options['access_level'] + # Because we have already call userExists in main() + if self.runner_object is None: + arguments['description'] = description + arguments['token'] = options['registration_token'] + + access_level_on_creation = self._module.params['access_level_on_creation'] + if access_level_on_creation is None: + message = "The option 'access_level_on_creation' is unspecified, so 'false' is assumed. "\ + "That means any value of 'access_level' is ignored and GitLab registers the runner with its default value. "\ + "The option 'access_level_on_creation' will switch to 'true' in community.general 7.0.0" + self._module.deprecate(message, version='7.0.0', collection_name='community.general') + access_level_on_creation = False + + if not access_level_on_creation: + arguments.pop('access_level', None) + + runner = self.create_runner(arguments) + changed = True + else: + changed, runner = self.update_runner(self.runner_object, arguments) + + self.runner_object = runner + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) + + try: + runner.save() + except Exception as e: + self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) + return True + else: + return False + + ''' + @param arguments Attributes of the runner + ''' + def create_runner(self, arguments): + if self._module.check_mode: + return True + + try: + runner = self._gitlab.runners.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) + + return runner + + ''' + @param runner Runner object + @param arguments Attributes of the runner + ''' + def update_runner(self, runner, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + if isinstance(arguments[arg_key], list): + list1 = getattr(runner, arg_key) + list1.sort() + list2 = arguments[arg_key] + list2.sort() + if cmp(list1, list2): + setattr(runner, arg_key, arguments[arg_key]) + changed = True + else: + if getattr(runner, arg_key) != arguments[arg_key]: + setattr(runner, arg_key, arguments[arg_key]) + changed = True + + return (changed, runner) + + ''' + @param description Description of the runner + ''' + def find_runner(self, description): + runners = self._runners_endpoint(as_list=False) + + for runner in runners: + # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner + # object, so we need to handle both + if hasattr(runner, "description"): + if (runner.description == description): + return self._gitlab.runners.get(runner.id) + else: + if (runner['description'] == description): + return self._gitlab.runners.get(runner['id']) + + ''' + @param description Description of the runner + ''' + def exists_runner(self, description): + # When runner exists, object will be stored in self.runner_object. + runner = self.find_runner(description) + + if runner: + self.runner_object = runner + return True + return False + + def delete_runner(self): + if self._module.check_mode: + return True + + runner = self.runner_object + + return runner.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + description=dict(type='str', required=True, aliases=["name"]), + active=dict(type='bool', default=True), + owned=dict(type='bool', default=False), + tag_list=dict(type='list', elements='str', default=[]), + run_untagged=dict(type='bool', default=True), + locked=dict(type='bool', default=False), + access_level=dict(type='str', choices=["not_protected", "ref_protected"]), + access_level_on_creation=dict(type='bool'), + maximum_timeout=dict(type='int', default=3600), + registration_token=dict(type='str', no_log=True), + project=dict(type='str'), + group=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'owned'], + ['group', 'owned'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + required_if=[ + ('state', 'present', ['registration_token']), + ], + supports_check_mode=True, + ) + ensure_gitlab_package(module) + + state = module.params['state'] + runner_description = module.params['description'] + runner_active = module.params['active'] + tag_list = module.params['tag_list'] + run_untagged = module.params['run_untagged'] + runner_locked = module.params['locked'] + access_level = module.params['access_level'] + maximum_timeout = module.params['maximum_timeout'] + registration_token = module.params['registration_token'] + project = module.params['project'] + group = module.params['group'] + + if access_level is None: + message = "The option 'access_level' is unspecified, so 'ref_protected' is assumed. "\ + "In order to align the module with GitLab's runner API, this option will lose "\ + "its default value in community.general 8.0.0. From that version on, you must set "\ + "this option to 'ref_protected' explicitly, if you want to have a protected runner, "\ + "otherwise GitLab's default access level gets applied, which is 'not_protected'" + module.deprecate(message, version='8.0.0', collection_name='community.general') + access_level = 'ref_protected' + + gitlab_instance = gitlab_authentication(module) + gitlab_project = None + gitlab_group = None + + if project: + try: + gitlab_project = gitlab_instance.projects.get(project) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) + elif group: + try: + gitlab_group = gitlab_instance.groups.get(group) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a group %s' % group, exception=to_native(e)) + + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project) + runner_exists = gitlab_runner.exists_runner(runner_description) + + if state == 'absent': + if runner_exists: + gitlab_runner.delete_runner() + module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) + else: + module.exit_json(changed=False, msg="Runner deleted or does not exists") + + if state == 'present': + if gitlab_runner.create_or_update_runner(runner_description, { + "active": runner_active, + "tag_list": tag_list, + "run_untagged": run_untagged, + "locked": runner_locked, + "access_level": access_level, + "maximum_timeout": maximum_timeout, + "registration_token": registration_token, + }): + module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, + msg="Successfully created or updated the runner %s" % runner_description) + else: + module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, + msg="No need to update the runner %s" % runner_description) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gitlab_user.py b/ansible_collections/community/general/plugins/modules/gitlab_user.py new file mode 100644 index 000000000..94f371316 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gitlab_user.py @@ -0,0 +1,691 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: gitlab_user +short_description: Creates/updates/deletes/blocks/unblocks GitLab Users +description: + - When the user does not exist in GitLab, it will be created. + - When the user exists and state=absent, the user will be deleted. + - When the user exists and state=blocked, the user will be blocked. + - When changes are made to user, the user will be updated. +notes: + - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. +author: + - Werner Dijkerman (@dj-wasabi) + - Guillaume Martinez (@Lunik) + - Lennert Mertens (@LennertMertens) + - Stef Graces (@stgrace) +requirements: + - python >= 2.7 + - python-gitlab python module + - administrator rights on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - Name of the user you want to create. + - Required only if C(state) is set to C(present). + type: str + username: + description: + - The username of the user. + required: true + type: str + password: + description: + - The password of the user. + - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. + type: str + reset_password: + description: + - Whether the user can change its password or not. + default: false + type: bool + version_added: 3.3.0 + email: + description: + - The email that belongs to the user. + - Required only if C(state) is set to C(present). + type: str + sshkey_name: + description: + - The name of the SSH public key. + type: str + sshkey_file: + description: + - The SSH public key itself. + type: str + sshkey_expires_at: + description: + - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). + - This is only used when adding new SSH public keys. + type: str + version_added: 3.1.0 + group: + description: + - Id or Full path of parent group in the form of group/name. + - Add user as a member to this group. + type: str + access_level: + description: + - The access level to the group. One of the following can be used. + - guest + - reporter + - developer + - master (alias for maintainer) + - maintainer + - owner + default: guest + type: str + choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] + state: + description: + - Create, delete or block a user. + default: present + type: str + choices: ["present", "absent", "blocked", "unblocked"] + confirm: + description: + - Require confirmation. + type: bool + default: true + isadmin: + description: + - Grant admin privileges to the user. + type: bool + default: false + external: + description: + - Define external parameter for this user. + type: bool + default: false + identities: + description: + - List of identities to be added/updated for this user. + - To remove all other identities from this user, set I(overwrite_identities=true). + type: list + elements: dict + suboptions: + provider: + description: + - The name of the external identity provider + type: str + extern_uid: + description: + - User ID for external identity. + type: str + version_added: 3.3.0 + overwrite_identities: + description: + - Overwrite identities with identities added in this module. + - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. + - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. + type: bool + default: false + version_added: 3.3.0 +''' + +EXAMPLES = ''' +- name: "Delete GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: false + username: myusername + state: absent + +- name: "Create GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: true + api_username: dj-wasabi + api_password: "MySecretPassword" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + sshkey_name: MySSH + sshkey_file: ssh-rsa AAAAB3NzaC1yc... + state: present + group: super_group/mon_group + access_level: owner + +- name: "Create GitLab User using external identity provider" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: true + api_token: "{{ access_token }}" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + identities: + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + state: present + group: super_group/mon_group + access_level: owner + +- name: "Block GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: false + username: myusername + state: blocked + +- name: "Unblock GitLab User" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + validate_certs: false + username: myusername + state: unblocked +''' + +RETURN = ''' +msg: + description: Success or failure message + returned: always + type: str + sample: "Success" + +result: + description: json parsed response from the server + returned: always + type: dict + +error: + description: the error message returned by the GitLab API + returned: failed + type: str + sample: "400: path is already in use" + +user: + description: API object + returned: always + type: dict +''' + + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package +) + + +class GitLabUser(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.user_object = None + self.ACCESS_LEVEL = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'master': gitlab.MAINTAINER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + 'owner': gitlab.OWNER_ACCESS, + } + + ''' + @param username Username of the user + @param options User options + ''' + def create_or_update_user(self, username, options): + changed = False + potentionally_changed = False + + # Because we have already call userExists in main() + if self.user_object is None: + user = self.create_user({ + 'name': options['name'], + 'username': username, + 'password': options['password'], + 'reset_password': options['reset_password'], + 'email': options['email'], + 'skip_confirmation': not options['confirm'], + 'admin': options['isadmin'], + 'external': options['external'], + 'identities': options['identities'], + }) + changed = True + else: + changed, user = self.update_user( + self.user_object, { + # add "normal" parameters here, put uncheckable + # params in the dict below + 'name': {'value': options['name']}, + 'email': {'value': options['email']}, + + # note: for some attributes like this one the key + # from reading back from server is unfortunately + # different to the one needed for pushing/writing, + # in that case use the optional setter key + 'is_admin': { + 'value': options['isadmin'], 'setter': 'admin' + }, + 'external': {'value': options['external']}, + 'identities': {'value': options['identities']}, + }, + { + # put "uncheckable" params here, this means params + # which the gitlab does accept for setting but does + # not return any information about it + 'skip_reconfirmation': {'value': not options['confirm']}, + 'password': {'value': options['password']}, + 'reset_password': {'value': options['reset_password']}, + 'overwrite_identities': {'value': options['overwrite_identities']}, + } + ) + + # note: as we unfortunately have some uncheckable parameters + # where it is not possible to determine if the update + # changed something or not, we must assume here that a + # changed happened and that an user object update is needed + potentionally_changed = True + + # Assign ssh keys + if options['sshkey_name'] and options['sshkey_file']: + key_changed = self.add_ssh_key_to_user(user, { + 'name': options['sshkey_name'], + 'file': options['sshkey_file'], + 'expires_at': options['sshkey_expires_at']}) + changed = changed or key_changed + + # Assign group + if options['group_path']: + group_changed = self.assign_user_to_group(user, options['group_path'], options['access_level']) + changed = changed or group_changed + + self.user_object = user + if (changed or potentionally_changed) and not self._module.check_mode: + try: + user.save() + except Exception as e: + self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) + + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) + return True + else: + return False + + ''' + @param group User object + ''' + def get_user_id(self, user): + if user is not None: + return user.id + return None + + ''' + @param user User object + @param sshkey_name Name of the ssh key + ''' + def ssh_key_exists(self, user, sshkey_name): + keyList = map(lambda k: k.title, user.keys.list(all=True)) + + return sshkey_name in keyList + + ''' + @param user User object + @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} + ''' + def add_ssh_key_to_user(self, user, sshkey): + if not self.ssh_key_exists(user, sshkey['name']): + if self._module.check_mode: + return True + + try: + parameter = { + 'title': sshkey['name'], + 'key': sshkey['file'], + } + if sshkey['expires_at'] is not None: + parameter['expires_at'] = sshkey['expires_at'] + user.keys.create(parameter) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) + return True + return False + + ''' + @param group Group object + @param user_id Id of the user to find + ''' + def find_member(self, group, user_id): + try: + member = group.members.get(user_id) + except gitlab.exceptions.GitlabGetError: + return None + return member + + ''' + @param group Group object + @param user_id Id of the user to check + ''' + def member_exists(self, group, user_id): + member = self.find_member(group, user_id) + + return member is not None + + ''' + @param group Group object + @param user_id Id of the user to check + @param access_level GitLab access_level to check + ''' + def member_as_good_access_level(self, group, user_id, access_level): + member = self.find_member(group, user_id) + + return member.access_level == access_level + + ''' + @param user User object + @param group_path Complete path of the Group including parent group path. / + @param access_level GitLab access_level to assign + ''' + def assign_user_to_group(self, user, group_identifier, access_level): + group = find_group(self._gitlab, group_identifier) + + if self._module.check_mode: + return True + + if group is None: + return False + + if self.member_exists(group, self.get_user_id(user)): + member = self.find_member(group, self.get_user_id(user)) + if not self.member_as_good_access_level(group, member.id, self.ACCESS_LEVEL[access_level]): + member.access_level = self.ACCESS_LEVEL[access_level] + member.save() + return True + else: + try: + group.members.create({ + 'user_id': self.get_user_id(user), + 'access_level': self.ACCESS_LEVEL[access_level]}) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e)) + return True + return False + + ''' + @param user User object + @param arguments User attributes + ''' + def update_user(self, user, arguments, uncheckable_args): + changed = False + + for arg_key, arg_value in arguments.items(): + av = arg_value['value'] + + if av is not None: + if arg_key == "identities": + changed = self.add_identities(user, av, uncheckable_args['overwrite_identities']['value']) + + elif getattr(user, arg_key) != av: + setattr(user, arg_value.get('setter', arg_key), av) + changed = True + + for arg_key, arg_value in uncheckable_args.items(): + av = arg_value['value'] + + if av is not None: + setattr(user, arg_value.get('setter', arg_key), av) + + return (changed, user) + + ''' + @param arguments User attributes + ''' + def create_user(self, arguments): + if self._module.check_mode: + return True + + identities = None + if 'identities' in arguments: + identities = arguments['identities'] + del arguments['identities'] + + try: + user = self._gitlab.users.create(arguments) + if identities: + self.add_identities(user, identities) + + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) + + return user + + ''' + @param user User object + @param identites List of identities to be added/updated + @param overwrite_identities Overwrite user identities with identities passed to this module + ''' + def add_identities(self, user, identities, overwrite_identities=False): + changed = False + if overwrite_identities: + changed = self.delete_identities(user, identities) + + for identity in identities: + if identity not in user.identities: + setattr(user, 'provider', identity['provider']) + setattr(user, 'extern_uid', identity['extern_uid']) + if not self._module.check_mode: + user.save() + changed = True + return changed + + ''' + @param user User object + @param identites List of identities to be added/updated + ''' + def delete_identities(self, user, identities): + changed = False + for identity in user.identities: + if identity not in identities: + if not self._module.check_mode: + user.identityproviders.delete(identity['provider']) + changed = True + return changed + + ''' + @param username Username of the user + ''' + def find_user(self, username): + users = self._gitlab.users.list(search=username, all=True) + for user in users: + if (user.username == username): + return user + + ''' + @param username Username of the user + ''' + def exists_user(self, username): + # When user exists, object will be stored in self.user_object. + user = self.find_user(username) + if user: + self.user_object = user + return True + return False + + ''' + @param username Username of the user + ''' + def is_active(self, username): + user = self.find_user(username) + return user.attributes['state'] == 'active' + + def delete_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.delete() + + def block_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.block() + + def unblock_user(self): + if self._module.check_mode: + return True + + user = self.user_object + + return user.unblock() + + +def sanitize_arguments(arguments): + for key, value in list(arguments.items()): + if value is None: + del arguments[key] + return arguments + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + name=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), + username=dict(type='str', required=True), + password=dict(type='str', no_log=True), + reset_password=dict(type='bool', default=False, no_log=False), + email=dict(type='str'), + sshkey_name=dict(type='str'), + sshkey_file=dict(type='str', no_log=False), + sshkey_expires_at=dict(type='str', no_log=False), + group=dict(type='str'), + access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), + confirm=dict(type='bool', default=True), + isadmin=dict(type='bool', default=False), + external=dict(type='bool', default=False), + identities=dict(type='list', elements='dict'), + overwrite_identities=dict(type='bool', default=False), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True, + required_if=( + ('state', 'present', ['name', 'email']), + ) + ) + ensure_gitlab_package(module) + + user_name = module.params['name'] + state = module.params['state'] + user_username = module.params['username'].lower() + user_password = module.params['password'] + user_reset_password = module.params['reset_password'] + user_email = module.params['email'] + user_sshkey_name = module.params['sshkey_name'] + user_sshkey_file = module.params['sshkey_file'] + user_sshkey_expires_at = module.params['sshkey_expires_at'] + group_path = module.params['group'] + access_level = module.params['access_level'] + confirm = module.params['confirm'] + user_isadmin = module.params['isadmin'] + user_external = module.params['external'] + user_identities = module.params['identities'] + overwrite_identities = module.params['overwrite_identities'] + + gitlab_instance = gitlab_authentication(module) + + gitlab_user = GitLabUser(module, gitlab_instance) + user_exists = gitlab_user.exists_user(user_username) + if user_exists: + user_is_active = gitlab_user.is_active(user_username) + else: + user_is_active = False + + if state == 'absent': + if user_exists: + gitlab_user.delete_user() + module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username) + else: + module.exit_json(changed=False, msg="User deleted or does not exists") + + if state == 'blocked': + if user_exists and user_is_active: + gitlab_user.block_user() + module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username) + else: + module.exit_json(changed=False, msg="User already blocked or does not exists") + + if state == 'unblocked': + if user_exists and not user_is_active: + gitlab_user.unblock_user() + module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username) + else: + module.exit_json(changed=False, msg="User is not blocked or does not exists") + + if state == 'present': + if gitlab_user.create_or_update_user(user_username, { + "name": user_name, + "password": user_password, + "reset_password": user_reset_password, + "email": user_email, + "sshkey_name": user_sshkey_name, + "sshkey_file": user_sshkey_file, + "sshkey_expires_at": user_sshkey_expires_at, + "group_path": group_path, + "access_level": access_level, + "confirm": confirm, + "isadmin": user_isadmin, + "external": user_external, + "identities": user_identities, + "overwrite_identities": overwrite_identities, + }): + module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.user_object._attrs) + else: + module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.user_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/grove.py b/ansible_collections/community/general/plugins/modules/grove.py new file mode 100644 index 000000000..b3e0508ff --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/grove.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: grove +short_description: Sends a notification to a grove.io channel +description: + - The C(grove) module sends a message for a service to a Grove.io + channel. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + channel_token: + type: str + description: + - Token of the channel to post to. + required: true + service: + type: str + description: + - Name of the service (displayed as the "user" in the message) + required: false + default: ansible + message_content: + type: str + description: + - Message content. + - The alias I(message) is deprecated and will be removed in community.general 4.0.0. + required: true + url: + type: str + description: + - Service URL for the web client + required: false + icon_url: + type: str + description: + - Icon for the service + required: false + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: true + type: bool +author: "Jonas Pfenniger (@zimbatm)" +''' + +EXAMPLES = ''' +- name: Sends a notification to a grove.io channel + community.general.grove: + channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg + service: my-app + message: 'deployed {{ target }}' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +BASE_URL = 'https://grove.io/api/notice/%s/' + +# ============================================================== +# do_notify_grove + + +def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): + my_url = BASE_URL % (channel_token,) + + my_data = dict(service=service, message=message) + if url is not None: + my_data['url'] = url + if icon_url is not None: + my_data['icon_url'] = icon_url + + data = urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) + +# ============================================================== +# main + + +def main(): + module = AnsibleModule( + argument_spec=dict( + channel_token=dict(type='str', required=True, no_log=True), + message_content=dict(type='str', required=True), + service=dict(type='str', default='ansible'), + url=dict(type='str', default=None), + icon_url=dict(type='str', default=None), + validate_certs=dict(default=True, type='bool'), + ) + ) + + channel_token = module.params['channel_token'] + service = module.params['service'] + message = module.params['message_content'] + url = module.params['url'] + icon_url = module.params['icon_url'] + + do_notify_grove(module, channel_token, service, message, url, icon_url) + + # Mission complete + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/gunicorn.py b/ansible_collections/community/general/plugins/modules/gunicorn.py new file mode 100644 index 000000000..2b2abcf8e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/gunicorn.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Alejandro Gomez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: gunicorn +short_description: Run gunicorn with various settings +description: + - Starts gunicorn with the parameters specified. Common settings for gunicorn + configuration are supported. For additional configuration use a config file + See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more + options. It's recommended to always use the chdir option to avoid problems + with the location of the app. +requirements: [gunicorn] +author: + - "Alejandro Gomez (@agmezr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + app: + type: str + required: true + aliases: ['name'] + description: + - The app module. A name refers to a WSGI callable that should be found in the specified module. + venv: + type: path + aliases: ['virtualenv'] + description: + - 'Path to the virtualenv directory.' + config: + type: path + description: + - 'Path to the gunicorn configuration file.' + aliases: ['conf'] + chdir: + type: path + description: + - 'Chdir to specified directory before apps loading.' + pid: + type: path + description: + - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp + pid file will be created to check a successful run of gunicorn.' + worker: + type: str + choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] + description: + - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.' + user: + type: str + description: + - 'Switch worker processes to run as this user.' +notes: + - If not specified on config file, a temporary error log will be created on /tmp dir. + Please make sure you have write access in /tmp dir. Not needed but will help you to + identify any problem with configuration. +''' + +EXAMPLES = ''' +- name: Simple gunicorn run example + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + +- name: Run gunicorn on a virtualenv + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + venv: '/workspace/example/venv' + +- name: Run gunicorn with a config file + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + +- name: Run gunicorn as ansible user with specified pid and config file + community.general.gunicorn: + app: 'wsgi' + chdir: '/workspace/example' + conf: '/workspace/example/gunicorn.cfg' + venv: '/workspace/example/venv' + pid: '/workspace/example/gunicorn.pid' + user: 'ansible' +''' + +RETURN = ''' +gunicorn: + description: process id of gunicorn + returned: changed + type: str + sample: "1234" +''' + +import os +import time + +from ansible.module_utils.basic import AnsibleModule + + +def search_existing_config(config, option): + ''' search in config file for specified option ''' + if config and os.path.isfile(config): + with open(config, 'r') as f: + for line in f: + if option in line: + return line + return None + + +def remove_tmp_file(file_path): + ''' remove temporary files ''' + if os.path.isfile(file_path): + os.remove(file_path) + + +def main(): + + # available gunicorn options on module + gunicorn_options = { + 'config': '-c', + 'chdir': '--chdir', + 'worker': '-k', + 'user': '-u', + } + + module = AnsibleModule( + argument_spec=dict( + app=dict(required=True, type='str', aliases=['name']), + venv=dict(type='path', aliases=['virtualenv']), + config=dict(type='path', aliases=['conf']), + chdir=dict(type='path'), + pid=dict(type='path'), + user=dict(type='str'), + worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']), + ) + ) + + # temporary files in case no option provided + tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log') + tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid') + + # remove temp file if exists + remove_tmp_file(tmp_pid_file) + remove_tmp_file(tmp_error_log) + + # obtain app name and venv + params = module.params + app = params['app'] + venv = params['venv'] + pid = params['pid'] + + # use venv path if exists + if venv: + gunicorn_command = "/".join((venv, 'bin', 'gunicorn')) + else: + gunicorn_command = module.get_bin_path('gunicorn') + + # to daemonize the process + options = ["-D"] + + # fill options + for option in gunicorn_options: + param = params[option] + if param: + options.append(gunicorn_options[option]) + options.append(param) + + error_log = search_existing_config(params['config'], 'errorlog') + if not error_log: + # place error log somewhere in case of fail + options.append("--error-logfile") + options.append(tmp_error_log) + + pid_file = search_existing_config(params['config'], 'pid') + if not params['pid'] and not pid_file: + pid = tmp_pid_file + + # add option for pid file if not found on config file + if not pid_file: + options.append('--pid') + options.append(pid) + + # put args together + args = [gunicorn_command] + options + [app] + rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None) + + if not err: + # wait for gunicorn to dump to log + time.sleep(0.5) + if os.path.isfile(pid): + with open(pid, 'r') as f: + result = f.readline().strip() + + if not params['pid']: + os.remove(pid) + + module.exit_json(changed=True, pid=result, debug=" ".join(args)) + else: + # if user defined own error log, check that + if error_log: + error = 'Please check your {0}'.format(error_log.strip()) + else: + if os.path.isfile(tmp_error_log): + with open(tmp_error_log, 'r') as f: + error = f.read() + # delete tmp log + os.remove(tmp_error_log) + else: + error = "Log not found" + + module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err) + + else: + module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hana_query.py b/ansible_collections/community/general/plugins/modules/hana_query.py new file mode 100644 index 000000000..0b12e9935 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hana_query.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: hana_query +short_description: Execute SQL on HANA +version_added: 3.2.0 +description: This module executes SQL statements on HANA with hdbsql. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + sid: + description: The system ID. + type: str + required: true + instance: + description: The instance number. + type: str + required: true + user: + description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). + type: str + default: SYSTEM + userstore: + description: If C(true) the user must be in hdbuserstore. + type: bool + default: false + version_added: 3.5.0 + password: + description: + - The password to connect to the database. + - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should + be used whenever possible, as command line arguments can be seen by other users + on the same machine." + type: str + autocommit: + description: Autocommit the statement. + type: bool + default: true + host: + description: The Host IP address. The port can be defined as well. + type: str + database: + description: Define the database on which to connect. + type: str + encrypted: + description: Use encrypted connection. Defaults to C(false). + type: bool + default: false + filepath: + description: + - One or more files each containing one SQL query to run. + - Must be a string or list containing strings. + type: list + elements: path + query: + description: + - SQL query to run. + - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. + It is better to supply a one-element list instead to avoid mangled input. + type: list + elements: str +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +- name: Simple select query + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: "select user_name from users" + +- name: Run several queries + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: + - "select user_name from users;" + - select * from SYSTEM; + host: "localhost" + autocommit: false + +- name: Run several queries from file + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + filepath: + - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt + - /tmp/HANA.txt + host: "localhost" + +- name: Run several queries from user store + community.general.hana_query: + sid: "hdb" + instance: "01" + user: hdbstoreuser + userstore: true + query: + - "select user_name from users;" + - select * from users; + autocommit: false +''' + +RETURN = r''' +query_result: + description: List containing results of all queries executed (one sublist for every query). + returned: on success + type: list + elements: list + sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] +''' + +import csv +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import StringIO +from ansible.module_utils.common.text.converters import to_native + + +def csv_to_list(rawcsv): + reader_raw = csv.DictReader(StringIO(rawcsv)) + reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] + return list(reader) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + sid=dict(type='str', required=True), + instance=dict(type='str', required=True), + encrypted=dict(type='bool', default=False), + host=dict(type='str', required=False), + user=dict(type='str', default="SYSTEM"), + userstore=dict(type='bool', default=False), + password=dict(type='str', no_log=True), + database=dict(type='str', required=False), + query=dict(type='list', elements='str', required=False), + filepath=dict(type='list', elements='path', required=False), + autocommit=dict(type='bool', default=True), + ), + required_one_of=[('query', 'filepath')], + required_if=[('userstore', False, ['password'])], + supports_check_mode=False, + ) + rc, out, err, out_raw = [0, [], "", ""] + + params = module.params + + sid = (params['sid']).upper() + instance = params['instance'] + user = params['user'] + userstore = params['userstore'] + password = params['password'] + autocommit = params['autocommit'] + host = params['host'] + database = params['database'] + encrypted = params['encrypted'] + + filepath = params['filepath'] + query = params['query'] + + bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) + + try: + command = [module.get_bin_path(bin_path, required=True)] + except Exception as e: + module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) + + if encrypted is True: + command.extend(['-attemptencrypt']) + if autocommit is False: + command.extend(['-z']) + if host is not None: + command.extend(['-n', host]) + if database is not None: + command.extend(['-d', database]) + # -x Suppresses additional output, such as the number of selected rows in a result set. + if userstore: + command.extend(['-x', '-U', user]) + else: + command.extend(['-x', '-i', instance, '-u', user, '-p', password]) + + if filepath is not None: + command.extend(['-I']) + for p in filepath: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, + # iterates through files and append the output to var out. + query_command = command + [p] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + if query is not None: + for q in query: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", + # iterates through multiple commands and append the output to var out. + query_command = command + [q] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + changed = True + + module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/haproxy.py b/ansible_collections/community/general/plugins/modules/haproxy.py new file mode 100644 index 000000000..56f987d80 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/haproxy.py @@ -0,0 +1,488 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Ravi Bhure +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: haproxy +short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands +author: + - Ravi Bhure (@ravibhure) +description: + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. +notes: + - Enable, disable and drain commands are restricted and can only be issued on + sockets configured for level 'admin'. For example, you can add the line + 'stats socket /var/run/haproxy.sock level admin' to the general section of + haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (C(nc)) being available; you need to install the appropriate + package for your operating system before this module can be used. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + backend: + description: + - Name of the HAProxy backend pool. + - If this parameter is unset, it will be auto-detected. + type: str + drain: + description: + - Wait until the server has no active connections or until the timeout + determined by wait_interval and wait_retries is reached. + - Continue only after the status changes to C(MAINT). + - This overrides the shutdown_sessions option. + type: bool + default: false + host: + description: + - Name of the backend host to change. + type: str + required: true + shutdown_sessions: + description: + - When disabling a server, immediately terminate all the sessions attached + to the specified server. + - This can be used to terminate long-running sessions after a server is put + into maintenance mode. Overridden by the drain option. + type: bool + default: false + socket: + description: + - Path to the HAProxy socket file. + type: path + default: /var/run/haproxy.sock + state: + description: + - Desired state of the provided backend host. + - Note that C(drain) state was added in version 2.4. + - It is supported only by HAProxy version 1.5 or later, + - When used on versions < 1.5, it will be ignored. + type: str + required: true + choices: [ disabled, drain, enabled ] + agent: + description: + - Disable/enable agent checks (depending on I(state) value). + type: bool + default: false + version_added: 1.0.0 + health: + description: + - Disable/enable health checks (depending on I(state) value). + type: bool + default: false + version_added: "1.0.0" + fail_on_not_found: + description: + - Fail whenever trying to enable/disable a backend host that does not exist. + type: bool + default: false + wait: + description: + - Wait until the server reports a status of C(UP) when I(state=enabled), + status of C(MAINT) when I(state=disabled) or status of C(DRAIN) when I(state=drain). + type: bool + default: false + wait_interval: + description: + - Number of seconds to wait between retries. + type: int + default: 5 + wait_retries: + description: + - Number of times to check for status after changing the state. + type: int + default: 25 + weight: + description: + - The value passed in argument. + - If the value ends with the C(%) sign, then the new weight will be + relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute + weights are permitted between 0 and 256. + type: str +''' + +EXAMPLES = r''' +- name: Disable server in 'www' backend pool + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Disable server in 'www' backend pool, also stop health/agent checks + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + health: true + agent: true + +- name: Disable server without backend pool name (apply to all available backend pool) + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + +- name: Disable server, provide socket file + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + +- name: Disable server, provide socket file, wait until status reports in maintenance + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: true + +# Place server in drain mode, providing a socket file. Then check the server's +# status every minute to see if it changes to maintenance mode, continuing if it +# does in an hour and failing otherwise. +- community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: true + drain: true + wait_interval: 60 + wait_retries: 60 + +- name: Disable backend server in 'www' backend pool and drop open sessions to it + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + socket: /var/run/haproxy.sock + shutdown_sessions: true + +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found + community.general.haproxy: + state: disabled + host: '{{ inventory_hostname }}' + fail_on_not_found: true + +- name: Enable server in 'www' backend pool + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + +- name: Enable server in 'www' backend pool wait until healthy + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: true + +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: true + wait_retries: 10 + wait_interval: 5 + +- name: Enable server in 'www' backend pool with change server(s) weight + community.general.haproxy: + state: enabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + weight: 10 + backend: www + +- name: Set the server in 'www' backend pool to drain mode + community.general.haproxy: + state: drain + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www +''' + +import csv +import socket +import time +from string import Template + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" +RECV_SIZE = 1024 +ACTION_CHOICES = ['enabled', 'disabled', 'drain'] +WAIT_RETRIES = 25 +WAIT_INTERVAL = 5 + + +###################################################################### +class TimeoutException(Exception): + pass + + +class HAProxy(object): + """ + Used for communicating with HAProxy through its local UNIX socket interface. + Perform common tasks in Haproxy related to enable server and + disable server. + + The complete set of external commands Haproxy handles is documented + on their website: + + http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands + """ + + def __init__(self, module): + self.module = module + + self.state = self.module.params['state'] + self.host = self.module.params['host'] + self.backend = self.module.params['backend'] + self.weight = self.module.params['weight'] + self.socket = self.module.params['socket'] + self.shutdown_sessions = self.module.params['shutdown_sessions'] + self.fail_on_not_found = self.module.params['fail_on_not_found'] + self.agent = self.module.params['agent'] + self.health = self.module.params['health'] + self.wait = self.module.params['wait'] + self.wait_retries = self.module.params['wait_retries'] + self.wait_interval = self.module.params['wait_interval'] + self._drain = self.module.params['drain'] + self.command_results = {} + + def execute(self, cmd, timeout=200, capture_output=True): + """ + Executes a HAProxy command by sending a message to a HAProxy's local + UNIX socket and waiting up to 'timeout' milliseconds for the response. + """ + self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.client.connect(self.socket) + self.client.sendall(to_bytes('%s\n' % cmd)) + + result = b'' + buf = b'' + buf = self.client.recv(RECV_SIZE) + while buf: + result += buf + buf = self.client.recv(RECV_SIZE) + result = to_text(result, errors='surrogate_or_strict') + + if capture_output: + self.capture_command_output(cmd, result.strip()) + self.client.close() + return result + + def capture_command_output(self, cmd, output): + """ + Capture the output for a command + """ + if 'command' not in self.command_results: + self.command_results['command'] = [] + self.command_results['command'].append(cmd) + if 'output' not in self.command_results: + self.command_results['output'] = [] + self.command_results['output'].append(output) + + def discover_all_backends(self): + """ + Discover all entries with svname = 'BACKEND' and return a list of their corresponding + pxnames + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) + + def discover_version(self): + """ + Attempt to extract the haproxy version. + Return a tuple containing major and minor version. + """ + data = self.execute('show info', 200, False) + lines = data.splitlines() + line = [x for x in lines if 'Version:' in x] + try: + version_values = line[0].partition(':')[2].strip().split('.', 3) + version = (int(version_values[0]), int(version_values[1])) + except (ValueError, TypeError, IndexError): + version = None + + return version + + def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None): + """ + Run some command on the specified backends. If no backends are provided they will + be discovered automatically (all backends) + """ + # Discover backends if none are given + if pxname is None: + backends = self.discover_all_backends() + else: + backends = [pxname] + + # Run the command for each requested backend + for backend in backends: + # Fail when backends were not found + state = self.get_state_for(backend, svname) + if (self.fail_on_not_found) and state is None: + self.module.fail_json( + msg="The specified backend '%s/%s' was not found!" % (backend, svname)) + + if state is not None: + self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) + if self.wait: + self.wait_until_status(backend, svname, wait_for_status) + + def get_state_for(self, pxname, svname): + """ + Find the state of specific services. When pxname is not set, get all backends for a specific host. + Returns a list of dictionaries containing the status and weight for those services. + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + state = tuple( + map( + lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, + filter(lambda d: (pxname is None or d['pxname'] + == pxname) and d['svname'] == svname, r) + ) + ) + return state or None + + def wait_until_status(self, pxname, svname, status): + """ + Wait for a service to reach the specified status. Try RETRIES times + with INTERVAL seconds of sleep in between. If the service has not reached + the expected status in that time, the module will fail. If the service was + not found, the module will fail. + """ + for i in range(1, self.wait_retries): + state = self.get_state_for(pxname, svname) + + # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here + # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching + if status in state[0]['status']: + if not self._drain or state[0]['scur'] == '0': + return True + time.sleep(self.wait_interval) + + self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % + (pxname, svname, status, self.wait_retries)) + + def enabled(self, host, backend, weight): + """ + Enabled action, marks server to UP and checks are re-enabled, + also supports to get current weight for server (default) and + set the weight for haproxy backend server when provides. + """ + cmd = "get weight $pxname/$svname; enable server $pxname/$svname" + if self.agent: + cmd += "; enable agent $pxname/$svname" + if self.health: + cmd += "; enable health $pxname/$svname" + if weight: + cmd += "; set weight $pxname/$svname %s" % weight + self.execute_for_backends(cmd, backend, host, 'UP') + + def disabled(self, host, backend, shutdown_sessions): + """ + Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be + performed on the server until it leaves maintenance, + also it shutdown sessions while disabling backend host server. + """ + cmd = "get weight $pxname/$svname" + if self.agent: + cmd += "; disable agent $pxname/$svname" + if self.health: + cmd += "; disable health $pxname/$svname" + cmd += "; disable server $pxname/$svname" + if shutdown_sessions: + cmd += "; shutdown sessions server $pxname/$svname" + self.execute_for_backends(cmd, backend, host, 'MAINT') + + def drain(self, host, backend, status='DRAIN'): + """ + Drain action, sets the server to DRAIN mode. + In this mode, the server will not accept any new connections + other than those that are accepted via persistence. + """ + haproxy_version = self.discover_version() + + # check if haproxy version supports DRAIN state (starting with 1.5) + if haproxy_version and (1, 5) <= haproxy_version: + cmd = "set server $pxname/$svname state drain" + self.execute_for_backends(cmd, backend, host, "DRAIN") + if status == "MAINT": + self.disabled(host, backend, self.shutdown_sessions) + + def act(self): + """ + Figure out what you want to do from ansible, and then do it. + """ + # Get the state before the run + self.command_results['state_before'] = self.get_state_for(self.backend, self.host) + + # toggle enable/disable server + if self.state == 'enabled': + self.enabled(self.host, self.backend, self.weight) + elif self.state == 'disabled' and self._drain: + self.drain(self.host, self.backend, status='MAINT') + elif self.state == 'disabled': + self.disabled(self.host, self.backend, self.shutdown_sessions) + elif self.state == 'drain': + self.drain(self.host, self.backend) + else: + self.module.fail_json(msg="unknown state specified: '%s'" % self.state) + + # Get the state after the run + self.command_results['state_after'] = self.get_state_for(self.backend, self.host) + + # Report change status + self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after']) + + self.module.exit_json(**self.command_results) + + +def main(): + + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=ACTION_CHOICES), + host=dict(type='str', required=True), + backend=dict(type='str'), + weight=dict(type='str'), + socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION), + shutdown_sessions=dict(type='bool', default=False), + fail_on_not_found=dict(type='bool', default=False), + health=dict(type='bool', default=False), + agent=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_retries=dict(type='int', default=WAIT_RETRIES), + wait_interval=dict(type='int', default=WAIT_INTERVAL), + drain=dict(type='bool', default=False), + ), + ) + + if not socket: + module.fail_json(msg="unable to locate haproxy socket") + + ansible_haproxy = HAProxy(module) + ansible_haproxy.act() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py new file mode 100644 index 000000000..e7b0de3f9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: heroku_collaborator +short_description: Add or delete app collaborators on Heroku +description: + - Manages collaborators for Heroku apps. + - If set to C(present) and heroku user is already collaborator, then do nothing. + - If set to C(present) and heroku user is not collaborator, then add user to app. + - If set to C(absent) and heroku user is collaborator, then delete user from app. +author: + - Marcel Arns (@marns93) +requirements: + - heroku3 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + type: str + description: + - Heroku API key + apps: + type: list + elements: str + description: + - List of Heroku App names + required: true + suppress_invitation: + description: + - Suppress email invitation when creating collaborator + type: bool + default: false + user: + type: str + description: + - User ID or e-mail + required: true + state: + type: str + description: + - Create or remove the heroku collaborator + choices: ["present", "absent"] + default: "present" +notes: + - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). + - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. +''' + +EXAMPLES = ''' +- name: Create a heroku collaborator + community.general.heroku_collaborator: + api_key: YOUR_API_KEY + user: max.mustermann@example.com + apps: heroku-example-app + state: present + +- name: An example of using the module in loop + community.general.heroku_collaborator: + api_key: YOUR_API_KEY + user: '{{ item.user }}' + apps: '{{ item.apps | default(apps) }}' + suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' + state: '{{ item.state | default("present") }}' + with_items: + - { user: 'a.b@example.com' } + - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } + - { user: 'x.y@example.com', apps: ["heroku-example-app"] } +''' + +RETURN = ''' # ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper + + +def add_or_delete_heroku_collaborator(module, client): + user = module.params['user'] + state = module.params['state'] + affected_apps = [] + result_state = False + + for app in module.params['apps']: + if app not in client.apps(): + module.fail_json(msg='App {0} does not exist'.format(app)) + + heroku_app = client.apps()[app] + + heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()] + + if state == 'absent' and user in heroku_collaborator_list: + if not module.check_mode: + heroku_app.remove_collaborator(user) + affected_apps += [app] + result_state = True + elif state == 'present' and user not in heroku_collaborator_list: + if not module.check_mode: + heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation']) + affected_apps += [app] + result_state = True + + return result_state, affected_apps + + +def main(): + argument_spec = HerokuHelper.heroku_argument_spec() + argument_spec.update( + user=dict(required=True, type='str'), + apps=dict(required=True, type='list', elements='str'), + suppress_invitation=dict(default=False, type='bool'), + state=dict(default='present', type='str', choices=['present', 'absent']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HerokuHelper(module).get_heroku_client() + + has_changed, msg = add_or_delete_heroku_collaborator(module, client) + module.exit_json(changed=has_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hg.py b/ansible_collections/community/general/plugins/modules/hg.py new file mode 100644 index 000000000..dbbd504b4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hg.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Yeukhon Wong +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: hg +short_description: Manages Mercurial (hg) repositories +description: + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. +author: "Yeukhon Wong (@yeukhon)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + repo: + description: + - The repository address. + required: true + aliases: [ name ] + type: str + dest: + description: + - Absolute path of where the repository should be cloned to. + This parameter is required, unless clone and update are set to no + type: path + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, + branch name or even tag. + aliases: [ version ] + type: str + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). Prior to + 1.9, the default was C(true). + type: bool + default: false + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: false + update: + description: + - If C(false), do not retrieve new revisions from the origin repository + type: bool + default: true + clone: + description: + - If C(false), do not clone the repository if it does not exist locally. + type: bool + default: true + executable: + description: + - Path to hg executable to use. If not supplied, + the normal mechanism for resolving binary paths will be used. + type: str +notes: + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - "If the task seems to be hanging, first verify remote host is in C(known_hosts). + SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, + one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling + the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, + if the underlying system still uses a Python version below 2.7.9, you will have issues checking out + bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +''' + +EXAMPLES = ''' +- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. + community.general.hg: + repo: https://bitbucket.org/user/repo1 + dest: /home/user/repo1 + revision: stable + purge: true + +- name: Get information about the repository whether or not it has already been cloned locally. + community.general.hg: + repo: git://bitbucket.org/user/repo + dest: /srv/checkout + clone: false + update: false +''' + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Hg(object): + def __init__(self, module, dest, repo, revision, hg_path): + self.module = module + self.dest = dest + self.repo = repo + self.revision = revision + self.hg_path = hg_path + + def _command(self, args_list): + (rc, out, err) = self.module.run_command([self.hg_path] + args_list) + return (rc, out, err) + + def _list_untracked(self): + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] + return self._command(args) + + def get_revision(self): + """ + hg id -b -i -t returns a string in the format: + "[+] " + This format lists the state of the current working copy, + and indicates whether there are uncommitted changes by the + plus sign. Otherwise, the sign is omitted. + + Read the full description via hg id --help + """ + (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def get_remote_revision(self): + (rc, out, err) = self._command(['id', self.repo]) + if rc != 0: + self.module.fail_json(msg=err) + else: + return to_native(out).strip('\n') + + def has_local_mods(self): + now = self.get_revision() + if '+' in now: + return True + else: + return False + + def discard(self): + before = self.has_local_mods() + if not before: + return False + + args = ['update', '-C', '-R', self.dest, '-r', '.'] + (rc, out, err) = self._command(args) + if rc != 0: + self.module.fail_json(msg=err) + + after = self.has_local_mods() + if before != after and not after: # no more local modification + return True + + def purge(self): + # before purge, find out if there are any untracked files + (rc1, out1, err1) = self._list_untracked() + if rc1 != 0: + self.module.fail_json(msg=err1) + + # there are some untrackd files + if out1 != '': + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] + (rc2, out2, err2) = self._command(args) + if rc2 != 0: + self.module.fail_json(msg=err2) + return True + else: + return False + + def cleanup(self, force, purge): + discarded = False + purged = False + + if force: + discarded = self.discard() + if purge: + purged = self.purge() + if discarded or purged: + return True + else: + return False + + def pull(self): + return self._command( + ['pull', '-R', self.dest, self.repo]) + + def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) + return self._command(['update', '-R', self.dest]) + + def clone(self): + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) + + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False + + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(type='str', required=True, aliases=['name']), + dest=dict(type='path'), + revision=dict(type='str', default=None, aliases=['version']), + force=dict(type='bool', default=False), + purge=dict(type='bool', default=False), + update=dict(type='bool', default=True), + clone=dict(type='bool', default=True), + executable=dict(type='str', default=None), + ), + ) + repo = module.params['repo'] + dest = module.params['dest'] + revision = module.params['revision'] + force = module.params['force'] + purge = module.params['purge'] + update = module.params['update'] + clone = module.params['clone'] + hg_path = module.params['executable'] or module.get_bin_path('hg', True) + if dest is not None: + hgrc = os.path.join(dest, '.hg/hgrc') + + # initial states + before = '' + changed = False + cleaned = False + + if not dest and (clone or update): + module.fail_json(msg="the destination directory must be specified unless clone=false and update=false") + + hg = Hg(module, dest, repo, revision, hg_path) + + # If there is no hgrc file, then assume repo is absent + # and perform clone. Otherwise, perform pull and update. + if not clone and not update: + out = hg.get_remote_revision() + module.exit_json(after=out, changed=False) + if not os.path.exists(hgrc): + if clone: + (rc, out, err) = hg.clone() + if rc != 0: + module.fail_json(msg=err) + else: + module.exit_json(changed=False) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) + else: + # get the current state before doing pulling + before = hg.get_revision() + + # can perform force and purge + cleaned = hg.cleanup(force, purge) + + (rc, out, err) = hg.pull() + if rc != 0: + module.fail_json(msg=err) + + (rc, out, err) = hg.update() + if rc != 0: + module.fail_json(msg=err) + + after = hg.get_revision() + if before != after or cleaned: + changed = True + + module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hipchat.py b/ansible_collections/community/general/plugins/modules/hipchat.py new file mode 100644 index 000000000..11b5fb735 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hipchat.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: hipchat +short_description: Send a message to Hipchat +description: + - Send a message to a Hipchat room, with options to control the formatting. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token. + required: true + room: + type: str + description: + - ID or name of the room. + required: true + msg_from: + type: str + description: + - Name the message will appear to be sent from. Max length is 15 + characters - above this it will be truncated. + default: Ansible + aliases: [from] + msg: + type: str + description: + - The message body. + required: true + color: + type: str + description: + - Background color for the message. + default: yellow + choices: [ "yellow", "red", "green", "purple", "gray", "random" ] + msg_format: + type: str + description: + - Message format. + default: text + choices: [ "text", "html" ] + notify: + description: + - If true, a notification will be triggered for users in the room. + type: bool + default: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + api: + type: str + description: + - API url if using a self-hosted hipchat server. For Hipchat API version + 2 use the default URI with C(/v2) instead of C(/v1). + default: 'https://api.hipchat.com/v1' + +author: +- Shirou Wakayama (@shirou) +- Paul Bourdel (@pb8226) +''' + +EXAMPLES = ''' +- name: Send a message to a Hipchat room + community.general.hipchat: + room: notif + msg: Ansible task finished + +- name: Send a message to a Hipchat room using Hipchat API version 2 + community.general.hipchat: + api: https://api.hipchat.com/v2/ + token: OAUTH2_TOKEN + room: notify + msg: Ansible task finished +''' + +# =========================================== +# HipChat module specific support methods. +# + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.request import pathname2url +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +DEFAULT_URI = "https://api.hipchat.com/v1" + +MSG_URI_V1 = "/rooms/message" + +NOTIFY_URI_V2 = "/room/{id_or_name}/notification" + + +def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI_V1): + '''sending message to hipchat v1 server''' + + params = {} + params['room_id'] = room + params['from'] = msg_from[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['api'] = api + params['notify'] = int(notify) + + url = api + MSG_URI_V1 + "?auth_token=%s" % (token) + data = urlencode(params) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=NOTIFY_URI_V2): + '''sending message to hipchat v2 server''' + + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} + + body = dict() + body['message'] = msg + body['color'] = color + body['message_format'] = msg_format + body['notify'] = notify + + POST_URL = api + NOTIFY_URI_V2 + + url = POST_URL.replace('{id_or_name}', pathname2url(room)) + data = json.dumps(body) + + if module.check_mode: + # In check mode, exit before actually sending the message + module.exit_json(changed=False) + + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + + # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows + # 204 to be the expected result code. + if info['status'] in [200, 204]: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + room=dict(required=True), + msg=dict(required=True), + msg_from=dict(default="Ansible", aliases=['from']), + color=dict(default="yellow", choices=["yellow", "red", "green", + "purple", "gray", "random"]), + msg_format=dict(default="text", choices=["text", "html"]), + notify=dict(default=True, type='bool'), + validate_certs=dict(default=True, type='bool'), + api=dict(default=DEFAULT_URI), + ), + supports_check_mode=True + ) + + token = module.params["token"] + room = str(module.params["room"]) + msg = module.params["msg"] + msg_from = module.params["msg_from"] + color = module.params["color"] + msg_format = module.params["msg_format"] + notify = module.params["notify"] + api = module.params["api"] + + try: + if api.find('/v2') != -1: + send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) + else: + send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) + + changed = True + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/homebrew.py b/ansible_collections/community/general/plugins/modules/homebrew.py new file mode 100644 index 000000000..7592f95a4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/homebrew.py @@ -0,0 +1,981 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# +# Based on macports (Jimmy Tang ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: homebrew +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" +requirements: + - homebrew must already be installed on the target system +short_description: Package manager for Homebrew +description: + - Manages Homebrew packages +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - A list of names of packages to install/remove. + aliases: [ 'formula', 'package', 'pkg' ] + type: list + elements: str + path: + description: + - "A C(:) separated list of paths to search for C(brew) executable. + Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, + providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - state of the package. + choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ] + default: present + type: str + update_homebrew: + description: + - update homebrew itself first. + type: bool + default: false + upgrade_all: + description: + - upgrade all homebrew packages. + type: bool + default: false + aliases: ['upgrade'] + install_options: + description: + - options flags to install a package. + aliases: ['options'] + type: list + elements: str + upgrade_options: + description: + - Option flags to upgrade. + type: list + elements: str + version_added: '0.2.0' +notes: + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. +''' + +EXAMPLES = ''' +# Install formula foo with 'brew' in default path +- community.general.homebrew: + name: foo + state: present + +# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +- community.general.homebrew: + name: foo + path: /my/other/location/bin + state: present + +# Update homebrew first and install formula foo with 'brew' in default path +- community.general.homebrew: + name: foo + state: present + update_homebrew: true + +# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path +- community.general.homebrew: + name: foo + state: latest + update_homebrew: true + +# Update homebrew and upgrade all packages +- community.general.homebrew: + update_homebrew: true + upgrade_all: true + +# Miscellaneous other examples +- community.general.homebrew: + name: foo + state: head + +- community.general.homebrew: + name: foo + state: linked + +- community.general.homebrew: + name: foo + state: absent + +- community.general.homebrew: + name: foo,bar + state: absent + +- community.general.homebrew: + name: foo + state: present + install_options: with-baz,enable-debug + +- name: Install formula foo with 'brew' from cask + community.general.homebrew: + name: homebrew/cask/foo + state: present + +- name: Use ignore-pinned option while upgrading all + community.general.homebrew: + upgrade_all: true + upgrade_options: ignore-pinned +''' + +RETURN = ''' +msg: + description: if the cache was updated or not + returned: always + type: str + sample: "Changed: 0, Unchanged: 2" +unchanged_pkgs: + description: + - List of package names which are unchanged after module run + returned: success + type: list + sample: ["awscli", "ag"] + version_added: '0.2.0' +changed_pkgs: + description: + - List of package names which are changed after module run + returned: success + type: list + sample: ['git', 'git-cola'] + version_added: '0.2.0' +''' + +import os.path +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems, string_types + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class Homebrew(object): + '''A class to manage Homebrew packages.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + \- # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + \- # dashes + '''.format(sep=os.path.sep) + + VALID_PACKAGE_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + \- # dashes + : # colons (for URLs) + @ # at-sign + ''' + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, string_types): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, string_types) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_package(cls, package): + '''A valid package is either None or alphanumeric.''' + + if package is None: + return True + + return ( + isinstance(package, string_types) + and not cls.INVALID_PACKAGE_REGEX.search(package) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - None + - installed + - upgraded + - head + - linked + - unlinked + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, string_types) + and state.lower() in ( + 'installed', + 'upgraded', + 'head', + 'linked', + 'unlinked', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewException(self.message) + + else: + if isinstance(path, string_types): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_package(self): + return self._current_package + + @current_package.setter + def current_package(self, package): + if not self.valid_package(package): + self._current_package = None + self.failed = True + self.message = 'Invalid package: {0}.'.format(package) + raise HomebrewException(self.message) + + else: + self._current_package = package + return package + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path, packages=None, state=None, + update_homebrew=False, upgrade_all=False, + install_options=None, upgrade_options=None): + if not install_options: + install_options = list() + if not upgrade_options: + upgrade_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, + upgrade_options=upgrade_options,) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.changed_pkgs = [] + self.unchanged_pkgs = [] + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in iteritems(kwargs): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_package_is_installed(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + self.current_package, + ] + rc, out, err = self.module.run_command(cmd) + for line in out.split('\n'): + if ( + re.search(r'Built from source', line) + or re.search(r'Poured from bottle', line) + ): + return True + + return False + + def _current_package_is_outdated(self): + if not self.valid_package(self.current_package): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'outdated', + self.current_package, + ]) + + return rc != 0 + + def _current_package_is_installed_from_head(self): + if not Homebrew.valid_package(self.current_package): + return False + elif not self._current_package_is_installed(): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'info', + self.current_package, + ]) + + try: + version_info = [line for line in out.split('\n') if line][0] + except IndexError: + return False + + return version_info.split(' ')[-1] == 'HEAD' + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.update_homebrew: + self._update_homebrew() + + if self.upgrade_all: + self._upgrade_all() + + if self.packages: + if self.state == 'installed': + return self._install_packages() + elif self.state == 'upgraded': + return self._upgrade_packages() + elif self.state == 'head': + return self._install_packages() + elif self.state == 'linked': + return self._link_packages() + elif self.state == 'unlinked': + return self._unlink_packages() + elif self.state == 'absent': + return self._uninstall_packages() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew would be updated.' + raise HomebrewException(self.message) + + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, string_types): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Homebrew packages would be upgraded.' + raise HomebrewException(self.message) + cmd = [self.brew_path, 'upgrade'] + self.upgrade_options + + rc, out, err = self.module.run_command(cmd) + if rc == 0: + if not out: + self.message = 'Homebrew packages already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self._current_package_is_installed(): + self.unchanged_count += 1 + self.unchanged_pkgs.append(self.current_package) + self.message = 'Package already installed: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be installed: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + if self.state == 'head': + head = '--HEAD' + else: + head = None + + opts = ( + [self.brew_path, 'install'] + + self.install_options + + [self.current_package, head] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed(): + self.changed_count += 1 + self.changed_pkgs.append(self.current_package) + self.changed = True + self.message = 'Package installed: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _install_packages(self): + for package in self.packages: + self.current_package = package + self._install_current_package() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_package(self): + command = 'upgrade' + + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + command = 'install' + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.message = 'Package is already upgraded: {0}'.format( + self.current_package, + ) + self.unchanged_count += 1 + self.unchanged_pkgs.append(self.current_package) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be upgraded: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, command] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.changed_count += 1 + self.changed_pkgs.append(self.current_package) + self.changed = True + self.message = 'Package upgraded: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_all_packages(self): + opts = ( + [self.brew_path, 'upgrade'] + + self.install_options + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed = True + self.message = 'All packages upgraded.' + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_packages(self): + if not self.packages: + self._upgrade_all_packages() + else: + for package in self.packages: + self.current_package = package + self._upgrade_current_package() + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.unchanged_count += 1 + self.unchanged_pkgs.append(self.current_package) + self.message = 'Package already uninstalled: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be uninstalled: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'uninstall', '--force'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if not self._current_package_is_installed(): + self.changed_count += 1 + self.changed_pkgs.append(self.current_package) + self.changed = True + self.message = 'Package uninstalled: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _uninstall_packages(self): + for package in self.packages: + self.current_package = package + self._uninstall_current_package() + + return True + # /uninstalled ----------------------------- }}} + + # linked --------------------------------- {{{ + def _link_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be linked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'link'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed_pkgs.append(self.current_package) + self.changed = True + self.message = 'Package linked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be linked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _link_packages(self): + for package in self.packages: + self.current_package = package + self._link_current_package() + + return True + # /linked -------------------------------- }}} + + # unlinked ------------------------------- {{{ + def _unlink_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be unlinked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'unlink'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed_pkgs.append(self.current_package) + self.changed = True + self.message = 'Package unlinked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _unlink_packages(self): + for package in self.packages: + self.current_package = package + self._unlink_current_package() + + return True + # /unlinked ------------------------------ }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "formula"], + required=False, + type='list', + elements='str', + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + required=False, + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", "head", + "linked", "unlinked", + "absent", "removed", "uninstalled", + ], + ), + update_homebrew=dict( + default=False, + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + elements='str', + ), + upgrade_options=dict( + default=None, + type='list', + elements='str', + ) + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + packages = p['name'] + else: + packages = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('head', ): + state = 'head' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state == 'linked': + state = 'linked' + if state == 'unlinked': + state = 'unlinked' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + update_homebrew = p['update_homebrew'] + if not update_homebrew: + module.run_command_environ_update.update( + dict(HOMEBREW_NO_AUTO_UPDATE="True") + ) + upgrade_all = p['upgrade_all'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + p['upgrade_options'] = p['upgrade_options'] or [] + upgrade_options = ['--{0}'.format(upgrade_option) + for upgrade_option in p['upgrade_options']] + brew = Homebrew(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, install_options=install_options, + upgrade_options=upgrade_options) + (failed, changed, message) = brew.run() + changed_pkgs = brew.changed_pkgs + unchanged_pkgs = brew.unchanged_pkgs + + if failed: + module.fail_json(msg=message) + module.exit_json( + changed=changed, + msg=message, + unchanged_pkgs=unchanged_pkgs, + changed_pkgs=changed_pkgs + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/ansible_collections/community/general/plugins/modules/homebrew_cask.py new file mode 100644 index 000000000..c992693b6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/homebrew_cask.py @@ -0,0 +1,895 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: homebrew_cask +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Enric Lluelles (@enriclluelles)" +short_description: Install and uninstall homebrew casks +description: + - Manages Homebrew casks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of cask to install or remove. + aliases: [ 'cask', 'package', 'pkg' ] + type: list + elements: str + path: + description: + - "':' separated list of paths to search for 'brew' executable." + default: '/usr/local/bin:/opt/homebrew/bin' + type: path + state: + description: + - State of the cask. + choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ] + default: present + type: str + sudo_password: + description: + - The sudo password to be passed to SUDO_ASKPASS. + required: false + type: str + update_homebrew: + description: + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). + type: bool + default: false + install_options: + description: + - Options flags to install a package. + aliases: [ 'options' ] + type: list + elements: str + accept_external_apps: + description: + - Allow external apps. + type: bool + default: false + upgrade_all: + description: + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. + type: bool + default: false + aliases: [ 'upgrade' ] + greedy: + description: + - Upgrade casks that auto update. + - Passes C(--greedy) to C(brew outdated --cask) when checking + if an installed cask has a newer version available, + or to C(brew upgrade --cask) when upgrading all casks. + type: bool + default: false +''' +EXAMPLES = ''' +- name: Install cask + community.general.homebrew_cask: + name: alfred + state: present + +- name: Remove cask + community.general.homebrew_cask: + name: alfred + state: absent + +- name: Install cask with install options + community.general.homebrew_cask: + name: alfred + state: present + install_options: 'appdir=/Applications' + +- name: Install cask with install options + community.general.homebrew_cask: + name: alfred + state: present + install_options: 'debug,appdir=/Applications' + +- name: Install cask with force option + community.general.homebrew_cask: + name: alfred + state: present + install_options: force + +- name: Allow external app + community.general.homebrew_cask: + name: alfred + state: present + accept_external_apps: true + +- name: Remove cask with force option + community.general.homebrew_cask: + name: alfred + state: absent + install_options: force + +- name: Upgrade all casks + community.general.homebrew_cask: + upgrade_all: true + +- name: Upgrade all casks with greedy option + community.general.homebrew_cask: + upgrade_all: true + greedy: true + +- name: Upgrade given cask with force option + community.general.homebrew_cask: + name: alfred + state: upgraded + install_options: force + +- name: Upgrade cask with greedy option + community.general.homebrew_cask: + name: 1password + state: upgraded + greedy: true + +- name: Using sudo password for installing cask + community.general.homebrew_cask: + name: wireshark + state: present + sudo_password: "{{ ansible_become_pass }}" +''' + +import os +import re +import tempfile + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems, string_types + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewCaskException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class HomebrewCask(object): + '''A class to manage Homebrew casks.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + \- # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + \- # dashes + '''.format(sep=os.path.sep) + + VALID_CASK_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \- # dashes + @ # at symbol + ''' + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, (string_types)): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, string_types) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_cask(cls, cask): + '''A valid cask is either None or alphanumeric + backslashes.''' + + if cask is None: + return True + + return ( + isinstance(cask, string_types) + and not cls.INVALID_CASK_REGEX.search(cask) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - installed + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, string_types) + and state.lower() in ( + 'installed', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewCaskException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewCaskException(self.message) + + else: + if isinstance(path, string_types): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewCaskException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_cask(self): + return self._current_cask + + @current_cask.setter + def current_cask(self, cask): + if not self.valid_cask(cask): + self._current_cask = None + self.failed = True + self.message = 'Invalid cask: {0}.'.format(cask) + raise HomebrewCaskException(self.message) + + else: + self._current_cask = cask + return cask + + @property + def brew_version(self): + try: + return self._brew_version + except AttributeError: + return None + + @brew_version.setter + def brew_version(self, brew_version): + self._brew_version = brew_version + + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=path, casks=None, state=None, + sudo_password=None, update_homebrew=False, + install_options=None, accept_external_apps=False, + upgrade_all=False, greedy=False): + if not install_options: + install_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in iteritems(kwargs): + setattr(self, key, val) + + def _prep(self): + self._prep_brew_path() + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewCaskException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewCaskException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewCaskException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_cask_is_outdated(self): + if not self.valid_cask(self.current_cask): + return False + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'outdated', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'outdated'] + + cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask] + + rc, out, err = self.module.run_command(cask_is_outdated_command) + + return out != "" + + def _current_cask_is_installed(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, "list", "--cask"] + else: + base_opts = [self.brew_path, "cask", "list"] + + cmd = base_opts + [self.current_cask] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def _get_brew_version(self): + if self.brew_version: + return self.brew_version + + cmd = [self.brew_path, '--version'] + + rc, out, err = self.module.run_command(cmd, check_rc=True) + + # get version string from first line of "brew --version" output + version = out.split('\n')[0].split(' ')[1] + self.brew_version = version + return self.brew_version + + def _brew_cask_command_is_deprecated(self): + # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/) + return LooseVersion(self._get_brew_version()) >= LooseVersion('2.6.0') + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.upgrade_all: + return self._upgrade_all() + + if self.casks: + if self.state == 'installed': + return self._install_casks() + elif self.state == 'upgraded': + return self._upgrade_casks() + elif self.state == 'absent': + return self._uninstall_casks() + + self.failed = True + self.message = "You must select a cask to install." + raise HomebrewCaskException(self.message) + + # sudo_password fix ---------------------- {{{ + def _run_command_with_sudo_password(self, cmd): + rc, out, err = '', '', '' + + with tempfile.NamedTemporaryFile() as sudo_askpass_file: + sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password)) + os.chmod(sudo_askpass_file.name, 0o700) + sudo_askpass_file.file.close() + + rc, out, err = self.module.run_command( + cmd, + environ_update={'SUDO_ASKPASS': sudo_askpass_file.name} + ) + + self.module.add_cleanup_file(sudo_askpass_file.name) + + return (rc, out, err) + # /sudo_password fix --------------------- }}} + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, string_types): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + if self.module.check_mode: + self.changed = True + self.message = 'Casks would be upgraded.' + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + cmd = [self.brew_path, 'upgrade', '--cask'] + else: + cmd = [self.brew_path, 'cask', 'upgrade'] + + if self.greedy: + cmd = cmd + ['--greedy'] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + self.message = 'Homebrew casks already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew casks upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if '--force' not in self.install_options and self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be installed: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'install', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'install'] + + opts = base_opts + [self.current_cask] + self.install_options + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask installed: {0}'.format(self.current_cask) + return True + elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _install_casks(self): + for cask in self.casks: + self.current_cask = cask + self._install_current_cask() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_cask(self): + command = 'upgrade' + + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + command = 'install' + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.message = 'Cask is already upgraded: {0}'.format( + self.current_cask, + ) + self.unchanged_count += 1 + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be upgraded: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, command, '--cask'] + else: + base_opts = [self.brew_path, 'cask', command] + + opts = base_opts + self.install_options + [self.current_cask] + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed() and not self._current_cask_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask upgraded: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _upgrade_casks(self): + for cask in self.casks: + self.current_cask = cask + self._upgrade_current_cask() + + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already uninstalled: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be uninstalled: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + if self._brew_cask_command_is_deprecated(): + base_opts = [self.brew_path, 'uninstall', '--cask'] + else: + base_opts = [self.brew_path, 'cask', 'uninstall'] + + opts = base_opts + [self.current_cask] + self.install_options + + cmd = [opt for opt in opts if opt] + + rc, out, err = '', '', '' + + if self.sudo_password: + rc, out, err = self._run_command_with_sudo_password(cmd) + else: + rc, out, err = self.module.run_command(cmd) + + if not self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask uninstalled: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _uninstall_casks(self): + for cask in self.casks: + self.current_cask = cask + self._uninstall_current_cask() + + return True + # /uninstalled --------------------------- }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["pkg", "package", "cask"], + required=False, + type='list', + elements='str', + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin", + required=False, + type='path', + ), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", + "absent", "removed", "uninstalled", + ], + ), + sudo_password=dict( + type="str", + required=False, + no_log=True, + ), + update_homebrew=dict( + default=False, + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + elements='str', + ), + accept_external_apps=dict( + default=False, + type='bool', + ), + upgrade_all=dict( + default=False, + aliases=["upgrade"], + type='bool', + ), + greedy=dict( + default=False, + type='bool', + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p['name']: + casks = p['name'] + else: + casks = None + + path = p['path'] + if path: + path = path.split(':') + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + sudo_password = p['sudo_password'] + + update_homebrew = p['update_homebrew'] + upgrade_all = p['upgrade_all'] + greedy = p['greedy'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + accept_external_apps = p['accept_external_apps'] + + brew_cask = HomebrewCask(module=module, path=path, casks=casks, + state=state, sudo_password=sudo_password, + update_homebrew=update_homebrew, + install_options=install_options, + accept_external_apps=accept_external_apps, + upgrade_all=upgrade_all, + greedy=greedy, + ) + (failed, changed, message) = brew_cask.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/ansible_collections/community/general/plugins/modules/homebrew_tap.py new file mode 100644 index 000000000..b230dbb34 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/homebrew_tap.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri +# +# Based on homebrew (Andrew Dunham ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: homebrew_tap +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" +short_description: Tap a Homebrew repository +description: + - Tap external Homebrew repositories. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + type: list + elements: str + url: + description: + - The optional git URL of the repository to tap. The URL is not + assumed to be on GitHub, and the protocol doesn't have to be HTTP. + Any location and protocol that git can handle is fine. + - I(name) option may not be a list of multiple taps (but a single + tap instead) when this option is provided. + required: false + type: str + state: + description: + - state of the repository. + choices: [ 'present', 'absent' ] + required: false + default: 'present' + type: str + path: + description: + - "A C(:) separated list of paths to search for C(brew) executable." + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + version_added: '2.1.0' +requirements: [ homebrew ] +''' + +EXAMPLES = r''' +- name: Tap a Homebrew repository, state present + community.general.homebrew_tap: + name: homebrew/dupes + +- name: Tap a Homebrew repository, state absent + community.general.homebrew_tap: + name: homebrew/dupes + state: absent + +- name: Tap a Homebrew repository, state present + community.general.homebrew_tap: + name: homebrew/dupes,homebrew/science + state: present + +- name: Tap a Homebrew repository using url, state present + community.general.homebrew_tap: + name: telemachus/brew + url: 'https://bitbucket.org/telemachus/brew' +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def a_valid_tap(tap): + '''Returns True if the tap is valid.''' + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') + return regex.match(tap) + + +def already_tapped(module, brew_path, tap): + '''Returns True if already tapped.''' + + rc, out, err = module.run_command([ + brew_path, + 'tap', + ]) + + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] + tap_name = re.sub('homebrew-', '', tap.lower()) + + return tap_name in taps + + +def add_tap(module, brew_path, tap, url=None): + '''Adds a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif not already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'tap', + tap, + url, + ]) + if rc == 0: + changed = True + msg = 'successfully tapped: %s' % tap + else: + failed = True + msg = 'failed to tap: %s due to %s' % (tap, err) + + else: + msg = 'already tapped: %s' % tap + + return (failed, changed, msg) + + +def add_taps(module, brew_path, taps): + '''Adds one or more taps.''' + failed, changed, unchanged, added, msg = False, False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = add_tap(module, brew_path, tap) + if failed: + break + if changed: + added += 1 + else: + unchanged += 1 + + if failed: + msg = 'added: %d, unchanged: %d, error: ' + msg + msg = msg % (added, unchanged) + elif added: + changed = True + msg = 'added: %d, unchanged: %d' % (added, unchanged) + else: + msg = 'added: %d, unchanged: %d' % (added, unchanged) + + return (failed, changed, msg) + + +def remove_tap(module, brew_path, tap): + '''Removes a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'untap', + tap, + ]) + if not already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully untapped: %s' % tap + else: + failed = True + msg = 'failed to untap: %s due to %s' % (tap, err) + + else: + msg = 'already untapped: %s' % tap + + return (failed, changed, msg) + + +def remove_taps(module, brew_path, taps): + '''Removes one or more taps.''' + failed, changed, unchanged, removed, msg = False, False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = remove_tap(module, brew_path, tap) + if failed: + break + if changed: + removed += 1 + else: + unchanged += 1 + + if failed: + msg = 'removed: %d, unchanged: %d, error: ' + msg + msg = msg % (removed, unchanged) + elif removed: + changed = True + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + else: + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + + return (failed, changed, msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['tap'], type='list', required=True, elements='str'), + url=dict(default=None, required=False), + state=dict(default='present', choices=['present', 'absent']), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + required=False, + type='path', + ), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + if path: + path = path.split(':') + + brew_path = module.get_bin_path( + 'brew', + required=True, + opt_dirs=path, + ) + + taps = module.params['name'] + url = module.params['url'] + + if module.params['state'] == 'present': + if url is None: + # No tap URL provided explicitly, continue with bulk addition + # of all the taps. + failed, changed, msg = add_taps(module, brew_path, taps) + else: + # When an tap URL is provided explicitly, we allow adding + # *single* tap only. Validate and proceed to add single tap. + if len(taps) > 1: + msg = "List of multiple taps may not be provided with 'url' option." + module.fail_json(msg=msg) + else: + failed, changed, msg = add_tap(module, brew_path, taps[0], url) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + elif module.params['state'] == 'absent': + failed, changed, msg = remove_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/homectl.py b/ansible_collections/community/general/plugins/modules/homectl.py new file mode 100644 index 000000000..301e388d3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/homectl.py @@ -0,0 +1,658 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, James Livulpi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: homectl +author: + - "James Livulpi (@jameslivulpi)" +short_description: Manage user accounts with systemd-homed +version_added: 4.4.0 +description: + - Manages a user's home directory managed by systemd-homed. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The user name to create, remove, or update. + required: true + aliases: [ 'user', 'username' ] + type: str + password: + description: + - Set the user's password to this. + - Homed requires this value to be in cleartext on user creation and updating a user. + - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt. + - See U(https://systemd.io/USER_RECORD/). + - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed. + type: str + state: + description: + - The operation to take on the user. + choices: [ 'absent', 'present' ] + default: present + type: str + storage: + description: + - Indicates the storage mechanism for the user's home directory. + - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use. + - Only used when a user is first created. + choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ] + type: str + disksize: + description: + - The intended home directory disk space. + - Human readable value such as C(10G), C(10M), or C(10B). + type: str + resize: + description: + - When used with I(disksize) this will attempt to resize the home directory immediately. + default: false + type: bool + realname: + description: + - The user's real ('human') name. + - This can also be used to add a comment to maintain compatibility with C(useradd). + aliases: [ 'comment' ] + type: str + realm: + description: + - The 'realm' a user is defined in. + type: str + email: + description: + - The email address of the user. + type: str + location: + description: + - A free-form location string describing the location of the user. + type: str + iconname: + description: + - The name of an icon picked by the user, for example for the purpose of an avatar. + - Should follow the semantics defined in the Icon Naming Specification. + - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. + type: str + homedir: + description: + - Path to use as home directory for the user. + - This is the directory the user's home directory is mounted to while the user is logged in. + - This is not where the user's data is actually stored, see I(imagepath) for that. + - Only used when a user is first created. + type: path + imagepath: + description: + - Path to place the user's home directory. + - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. + - Only used when a user is first created. + type: path + uid: + description: + - Sets the UID of the user. + - If using I(gid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + gid: + description: + - Sets the gid of the user. + - If using I(uid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + mountopts: + description: + - String separated by comma each indicating mount options for a users home directory. + - Valid options are C(nosuid), C(nodev) or C(noexec). + - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off. + type: str + umask: + description: + - Sets the umask for the user's login sessions + - Value from C(0000) to C(0777). + type: int + memberof: + description: + - String separated by comma each indicating a UNIX group this user shall be a member of. + - Groups the user should be a member of should be supplied as comma separated list. + aliases: [ 'groups' ] + type: str + skeleton: + description: + - The absolute path to the skeleton directory to populate a new home directory from. + - This is only used when a home directory is first created. + - If not specified homed by default uses C(/etc/skel). + aliases: [ 'skel' ] + type: path + shell: + description: + - Shell binary to use for terminal logins of given user. + - If not specified homed by default uses C(/bin/bash). + type: str + environment: + description: + - String separated by comma each containing an environment variable and its value to + set for the user's login session, in a format compatible with ``putenv()``. + - Any environment variable listed here is automatically set by pam_systemd for all + login sessions of the user. + aliases: [ 'setenv' ] + type: str + timezone: + description: + - Preferred timezone to use for the user. + - Should be a tzdata compatible location string such as C(America/New_York). + type: str + locked: + description: + - Whether the user account should be locked or not. + type: bool + language: + description: + - The preferred language/locale for the user. + - This should be in a format compatible with the C($LANG) environment variable. + type: str + passwordhint: + description: + - Password hint for the given user. + type: str + sshkeys: + description: + - String separated by comma each listing a SSH public key that is authorized to access the account. + - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. + type: str + notbefore: + description: + - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. + type: int + notafter: + description: + - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. + type: int +''' + +EXAMPLES = ''' +- name: Add the user 'james' + community.general.homectl: + name: johnd + password: myreallysecurepassword1! + state: present + +- name: Add the user 'alice' with a zsh shell, uid of 1000, and gid of 2000 + community.general.homectl: + name: alice + password: myreallysecurepassword1! + state: present + shell: /bin/zsh + uid: 1000 + gid: 1000 + +- name: Modify an existing user 'frank' to have 10G of diskspace and resize usage now + community.general.homectl: + name: frank + password: myreallysecurepassword1! + state: present + disksize: 10G + resize: true + +- name: Remove an existing user 'janet' + community.general.homectl: + name: janet + state: absent +''' + +RETURN = ''' +data: + description: A json dictionary returned from C(homectl inspect -j). + returned: success + type: dict + sample: { + "data": { + "binding": { + "e9ed2a5b0033427286b228e97c1e8343": { + "fileSystemType": "btrfs", + "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", + "gid": 60268, + "imagePath": "/home/james.home", + "luksCipher": "aes", + "luksCipherMode": "xts-plain64", + "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", + "luksVolumeKeySize": 32, + "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", + "storage": "luks", + "uid": 60268 + } + }, + "diskSize": 3221225472, + "disposition": "regular", + "lastChangeUSec": 1641941238208691, + "lastPasswordChangeUSec": 1641941238208691, + "privileged": { + "hashedPassword": [ + "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." + ] + }, + "signature": [ + { + "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", + "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" + } + ], + "status": { + "e9ed2a5b0033427286b228e97c1e8343": { + "diskCeiling": 21845405696, + "diskFloor": 268435456, + "diskSize": 3221225472, + "service": "io.systemd.Home", + "signedLocally": true, + "state": "inactive" + } + }, + "userName": "james", + } + } +''' + +import crypt +import json +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import jsonify +from ansible.module_utils.common.text.formatters import human_to_bytes + + +class Homectl(object): + '''#TODO DOC STRINGS''' + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.name = module.params['name'] + self.password = module.params['password'] + self.storage = module.params['storage'] + self.disksize = module.params['disksize'] + self.resize = module.params['resize'] + self.realname = module.params['realname'] + self.realm = module.params['realm'] + self.email = module.params['email'] + self.location = module.params['location'] + self.iconname = module.params['iconname'] + self.homedir = module.params['homedir'] + self.imagepath = module.params['imagepath'] + self.uid = module.params['uid'] + self.gid = module.params['gid'] + self.umask = module.params['umask'] + self.memberof = module.params['memberof'] + self.skeleton = module.params['skeleton'] + self.shell = module.params['shell'] + self.environment = module.params['environment'] + self.timezone = module.params['timezone'] + self.locked = module.params['locked'] + self.passwordhint = module.params['passwordhint'] + self.sshkeys = module.params['sshkeys'] + self.language = module.params['language'] + self.notbefore = module.params['notbefore'] + self.notafter = module.params['notafter'] + self.mountopts = module.params['mountopts'] + + self.result = {} + + # Cannot run homectl commands if service is not active + def homed_service_active(self): + is_active = True + cmd = ['systemctl', 'show', 'systemd-homed.service', '-p', 'ActiveState'] + rc, show_service_stdout, stderr = self.module.run_command(cmd) + if rc == 0: + state = show_service_stdout.rsplit('=')[1] + if state.strip() != 'active': + is_active = False + return is_active + + def user_exists(self): + exists = False + valid_pw = False + # Get user properties if they exist in json + rc, stdout, stderr = self.get_user_metadata() + if rc == 0: + exists = True + # User exists now compare password given with current hashed password stored in the user metadata. + if self.state != 'absent': # Don't need checking on remove user + stored_pwhash = json.loads(stdout)['privileged']['hashedPassword'][0] + if self._check_password(stored_pwhash): + valid_pw = True + return exists, valid_pw + + def create_user(self): + record = self.create_json_record(create=True) + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('create') + cmd.append('--identity=-') # Read the user record from standard input. + return self.module.run_command(cmd, data=record) + + def _hash_password(self, password): + method = crypt.METHOD_SHA512 + salt = crypt.mksalt(method, rounds=10000) + pw_hash = crypt.crypt(password, salt) + return pw_hash + + def _check_password(self, pwhash): + hash = crypt.crypt(self.password, pwhash) + return pwhash == hash + + def remove_user(self): + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('remove') + cmd.append(self.name) + return self.module.run_command(cmd) + + def prepare_modify_user_command(self): + record = self.create_json_record() + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('update') + cmd.append(self.name) + cmd.append('--identity=-') # Read the user record from standard input. + # Resize disksize now resize = true + # This is not valid in user record (json) and requires it to be passed on command. + if self.disksize and self.resize: + cmd.append('--and-resize') + cmd.append('true') + self.result['changed'] = True + return cmd, record + + def get_user_metadata(self): + cmd = [self.module.get_bin_path('homectl', True)] + cmd.append('inspect') + cmd.append(self.name) + cmd.append('-j') + cmd.append('--no-pager') + rc, stdout, stderr = self.module.run_command(cmd) + return rc, stdout, stderr + + # Build up dictionary to jsonify for homectl commands. + def create_json_record(self, create=False): + record = {} + user_metadata = {} + self.result['changed'] = False + # Get the current user record if not creating a new user record. + if not create: + rc, user_metadata, stderr = self.get_user_metadata() + user_metadata = json.loads(user_metadata) + # Remove elements that are not meant to be updated from record. + # These are always part of the record when a user exists. + user_metadata.pop('signature', None) + user_metadata.pop('binding', None) + user_metadata.pop('status', None) + # Let last change Usec be updated by homed when command runs. + user_metadata.pop('lastChangeUSec', None) + # Now only change fields that are called on leaving whats currently in the record intact. + record = user_metadata + + record['userName'] = self.name + record['secret'] = {'password': [self.password]} + + if create: + password_hash = self._hash_password(self.password) + record['privileged'] = {'hashedPassword': [password_hash]} + self.result['changed'] = True + + if self.uid and self.gid and create: + record['uid'] = self.uid + record['gid'] = self.gid + self.result['changed'] = True + + if self.memberof: + member_list = list(self.memberof.split(',')) + if member_list != record.get('memberOf', [None]): + record['memberOf'] = member_list + self.result['changed'] = True + + if self.realname: + if self.realname != record.get('realName'): + record['realName'] = self.realname + self.result['changed'] = True + + # Cannot update storage unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.storage and create: + record['storage'] = self.storage + self.result['changed'] = True + + # Cannot update homedir unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.homedir and create: + record['homeDirectory'] = self.homedir + self.result['changed'] = True + + # Cannot update imagepath unless were creating a new user. + # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/ + if self.imagepath and create: + record['imagePath'] = self.imagepath + self.result['changed'] = True + + if self.disksize: + # convert humand readble to bytes + if self.disksize != record.get('diskSize'): + record['diskSize'] = human_to_bytes(self.disksize) + self.result['changed'] = True + + if self.realm: + if self.realm != record.get('realm'): + record['realm'] = self.realm + self.result['changed'] = True + + if self.email: + if self.email != record.get('emailAddress'): + record['emailAddress'] = self.email + self.result['changed'] = True + + if self.location: + if self.location != record.get('location'): + record['location'] = self.location + self.result['changed'] = True + + if self.iconname: + if self.iconname != record.get('iconName'): + record['iconName'] = self.iconname + self.result['changed'] = True + + if self.skeleton: + if self.skeleton != record.get('skeletonDirectory'): + record['skeletonDirectory'] = self.skeleton + self.result['changed'] = True + + if self.shell: + if self.shell != record.get('shell'): + record['shell'] = self.shell + self.result['changed'] = True + + if self.umask: + if self.umask != record.get('umask'): + record['umask'] = self.umask + self.result['changed'] = True + + if self.environment: + if self.environment != record.get('environment', [None]): + record['environment'] = list(self.environment.split(',')) + self.result['changed'] = True + + if self.timezone: + if self.timezone != record.get('timeZone'): + record['timeZone'] = self.timezone + self.result['changed'] = True + + if self.locked: + if self.locked != record.get('locked'): + record['locked'] = self.locked + self.result['changed'] = True + + if self.passwordhint: + if self.passwordhint != record.get('privileged', {}).get('passwordHint'): + record['privileged']['passwordHint'] = self.passwordhint + self.result['changed'] = True + + if self.sshkeys: + if self.sshkeys != record.get('privileged', {}).get('sshAuthorizedKeys'): + record['privileged']['sshAuthorizedKeys'] = list(self.sshkeys.split(',')) + self.result['changed'] = True + + if self.language: + if self.locked != record.get('preferredLanguage'): + record['preferredLanguage'] = self.language + self.result['changed'] = True + + if self.notbefore: + if self.locked != record.get('notBeforeUSec'): + record['notBeforeUSec'] = self.notbefore + self.result['changed'] = True + + if self.notafter: + if self.locked != record.get('notAfterUSec'): + record['notAfterUSec'] = self.notafter + self.result['changed'] = True + + if self.mountopts: + opts = list(self.mountopts.split(',')) + if 'nosuid' in opts: + if record.get('mountNoSuid') is not True: + record['mountNoSuid'] = True + self.result['changed'] = True + else: + if record.get('mountNoSuid') is not False: + record['mountNoSuid'] = False + self.result['changed'] = True + + if 'nodev' in opts: + if record.get('mountNoDevices') is not True: + record['mountNoDevices'] = True + self.result['changed'] = True + else: + if record.get('mountNoDevices') is not False: + record['mountNoDevices'] = False + self.result['changed'] = True + + if 'noexec' in opts: + if record.get('mountNoExecute') is not True: + record['mountNoExecute'] = True + self.result['changed'] = True + else: + if record.get('mountNoExecute') is not False: + record['mountNoExecute'] = False + self.result['changed'] = True + + return jsonify(record) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type='str', required=True, aliases=['user', 'username']), + password=dict(type='str', no_log=True), + storage=dict(type='str', choices=['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs']), + disksize=dict(type='str'), + resize=dict(type='bool', default=False), + realname=dict(type='str', aliases=['comment']), + realm=dict(type='str'), + email=dict(type='str'), + location=dict(type='str'), + iconname=dict(type='str'), + homedir=dict(type='path'), + imagepath=dict(type='path'), + uid=dict(type='int'), + gid=dict(type='int'), + umask=dict(type='int'), + environment=dict(type='str', aliases=['setenv']), + timezone=dict(type='str'), + memberof=dict(type='str', aliases=['groups']), + skeleton=dict(type='path', aliases=['skel']), + shell=dict(type='str'), + locked=dict(type='bool'), + passwordhint=dict(type='str', no_log=True), + sshkeys=dict(type='str', no_log=True), + language=dict(type='str'), + notbefore=dict(type='int'), + notafter=dict(type='int'), + mountopts=dict(type='str'), + ), + supports_check_mode=True, + + required_if=[ + ('state', 'present', ['password']), + ('resize', True, ['disksize']), + ] + ) + + homectl = Homectl(module) + homectl.result['state'] = homectl.state + + # First we need to make sure homed service is active + if not homectl.homed_service_active(): + module.fail_json(msg='systemd-homed.service is not active') + + # handle removing user + if homectl.state == 'absent': + user_exists, valid_pwhash = homectl.user_exists() + if user_exists: + if module.check_mode: + module.exit_json(changed=True) + rc, stdout, stderr = homectl.remove_user() + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc) + homectl.result['changed'] = True + homectl.result['rc'] = rc + homectl.result['msg'] = 'User %s removed!' % homectl.name + else: + homectl.result['changed'] = False + homectl.result['msg'] = 'User does not exist!' + + # Handle adding a user + if homectl.state == 'present': + user_exists, valid_pwhash = homectl.user_exists() + if not user_exists: + if module.check_mode: + module.exit_json(changed=True) + rc, stdout, stderr = homectl.create_user() + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc) + rc, user_metadata, stderr = homectl.get_user_metadata() + homectl.result['data'] = json.loads(user_metadata) + homectl.result['rc'] = rc + homectl.result['msg'] = 'User %s created!' % homectl.name + else: + if valid_pwhash: + # Run this to see if changed would be True or False which is useful for check_mode + cmd, record = homectl.prepare_modify_user_command() + else: + # User gave wrong password fail with message + homectl.result['changed'] = False + homectl.result['msg'] = 'User exists but password is incorrect!' + module.fail_json(**homectl.result) + + if module.check_mode: + module.exit_json(**homectl.result) + + # Now actually modify the user if changed was set to true at any point. + if homectl.result['changed']: + rc, stdout, stderr = module.run_command(cmd, data=record) + if rc != 0: + module.fail_json(name=homectl.name, msg=stderr, rc=rc, changed=False) + rc, user_metadata, stderr = homectl.get_user_metadata() + homectl.result['data'] = json.loads(user_metadata) + homectl.result['rc'] = rc + if homectl.result['changed']: + homectl.result['msg'] = 'User %s modified' % homectl.name + + module.exit_json(**homectl.result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py new file mode 100644 index 000000000..820e4538e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Benjamin Curtis +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: honeybadger_deployment +author: "Benjamin Curtis (@stympy)" +short_description: Notify Honeybadger.io about app deployments +description: + - Notify Honeybadger.io about app deployments (see U(http://docs.honeybadger.io/article/188-deployment-tracking)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token. + required: true + environment: + type: str + description: + - The environment name, typically 'production', 'staging', etc. + required: true + user: + type: str + description: + - The username of the person doing the deployment + repo: + type: str + description: + - URL of the project repository + revision: + type: str + description: + - A hash, number, tag, or other identifier showing what revision was deployed + url: + type: str + description: + - Optional URL to submit the notification to. + default: "https://api.honeybadger.io/v1/deploys" + validate_certs: + description: + - If C(false), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + +''' + +EXAMPLES = ''' +- name: Notify Honeybadger.io about an app deployment + community.general.honeybadger_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: b6826b8 + repo: 'git@github.com:user/repo.git' +''' + +RETURN = '''# ''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(required=False), + revision=dict(required=False), + url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + params = {} + + if module.params["environment"]: + params["deploy[environment]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception as e: + module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 201: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hpilo_boot.py b/ansible_collections/community/general/plugins/modules/hpilo_boot.py new file mode 100644 index 000000000..ace79a493 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hpilo_boot.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: hpilo_boot +author: Dag Wieers (@dagwieers) +short_description: Boot system using specific media through HP iLO interface +description: + - "This module boots a system through its HP iLO interface. The boot media + can be one of: cdrom, floppy, hdd, network or usb." + - This module requires the hpilo python module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + type: str + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + default: Administrator + type: str + password: + description: + - The password to authenticate to the HP iLO interface. + default: admin + type: str + media: + description: + - The boot media to boot the system from + choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ] + type: str + image: + description: + - The URL of a cdrom, floppy or usb boot media image. + protocol://username:password@hostname:port/filename + - protocol is either 'http' or 'https' + - username:password is optional + - port is optional + type: str + state: + description: + - The state of the boot media. + - "no_boot: Do not boot from the device" + - "boot_once: Boot from the device once and then notthereafter" + - "boot_always: Boot from the device each time the server is rebooted" + - "connect: Connect the virtual media device and set to boot_always" + - "disconnect: Disconnects the virtual media device and set to no_boot" + - "poweroff: Power off the server" + default: boot_once + type: str + choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] + force: + description: + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. + default: false + type: bool + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + type: str + choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] +requirements: +- python-hpilo +notes: +- To use a USB key image you need to specify floppy as boot media. +- This module ought to be run from a system that can access the HP iLO + interface directly, either by using C(local_action) or using C(delegate_to). +''' + +EXAMPLES = r''' +- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server + community.general.hpilo_boot: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + media: cdrom + image: http://some-web-server/iso/boot.iso + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + +- name: Power off a server + community.general.hpilo_boot: + host: YOUR_ILO_HOST + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + state: poweroff + delegate_to: localhost +''' + +RETURN = ''' +# Default return values +''' + +import time +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']), + image=dict(type='str'), + state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), + force=dict(type='bool', default=False), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ) + ) + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + media = module.params['media'] + image = module.params['image'] + state = module.params['state'] + force = module.params['force'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + changed = False + status = {} + power_status = 'UNKNOWN' + + if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): + + # Workaround for: Error communicating with iLO: Problem manipulating EV + try: + ilo.set_one_time_boot(media) + except hpilo.IloError: + time.sleep(60) + ilo.set_one_time_boot(media) + + # TODO: Verify if image URL exists/works + if image: + ilo.insert_virtual_media(media, image) + changed = True + + if media == 'cdrom': + ilo.set_vm_status('cdrom', state, True) + status = ilo.get_vm_status() + changed = True + elif media in ('floppy', 'usb'): + ilo.set_vf_status(state, True) + status = ilo.get_vf_status() + changed = True + + # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot + if state in ('boot_once', 'boot_always') or force: + + power_status = ilo.get_host_power_status() + + if not force and power_status == 'ON': + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + + if power_status == 'ON': + ilo.warm_boot_server() +# ilo.cold_boot_server() + changed = True + else: + ilo.press_pwr_btn() +# ilo.reset_server() +# ilo.set_host_power(host_power=True) + changed = True + + elif state in ('poweroff'): + + power_status = ilo.get_host_power_status() + + if not power_status == 'OFF': + ilo.hold_pwr_btn() +# ilo.set_host_power(host_power=False) + changed = True + + module.exit_json(changed=changed, power=power_status, **status) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hpilo_info.py b/ansible_collections/community/general/plugins/modules/hpilo_info.py new file mode 100644 index 000000000..cef6597e4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hpilo_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2012 Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: hpilo_info +author: Dag Wieers (@dagwieers) +short_description: Gather information through an HP iLO interface +description: +- This module gathers information on a specific system using its HP iLO interface. + These information includes hardware and network related information useful + for provisioning (e.g. macaddress, uuid). +- This module requires the C(hpilo) python module. +- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)! +extends_documentation_fragment: +- community.general.attributes +- community.general.attributes.info_module +options: + host: + description: + - The HP iLO hostname/address that is linked to the physical system. + type: str + required: true + login: + description: + - The login name to authenticate to the HP iLO interface. + type: str + default: Administrator + password: + description: + - The password to authenticate to the HP iLO interface. + type: str + default: admin + ssl_version: + description: + - Change the ssl_version used. + default: TLSv1 + type: str + choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] +requirements: +- hpilo +notes: +- This module ought to be run from a system that can access the HP iLO + interface directly, either by using C(local_action) or using C(delegate_to). +''' + +EXAMPLES = r''' +- name: Gather facts from a HP iLO interface only if the system is an HP server + community.general.hpilo_info: + host: YOUR_ILO_ADDRESS + login: YOUR_ILO_LOGIN + password: YOUR_ILO_PASSWORD + when: cmdb_hwmodel.startswith('HP ') + delegate_to: localhost + register: results + +- ansible.builtin.fail: + msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' + when: cmdb_serialno != results.hw_system_serial +''' + +RETURN = r''' +# Typical output of HP iLO_info for a physical system +hw_bios_date: + description: BIOS date + returned: always + type: str + sample: 05/05/2011 + +hw_bios_version: + description: BIOS version + returned: always + type: str + sample: P68 + +hw_ethX: + description: Interface information (for each interface) + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 + +hw_eth_ilo: + description: Interface information (for the iLO network interface) + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA + +hw_product_name: + description: Product name + returned: always + type: str + sample: ProLiant DL360 G7 + +hw_product_uuid: + description: Product UUID + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac + +hw_system_serial: + description: System serial number + returned: always + type: str + sample: ABC12345D6 + +hw_uuid: + description: Hardware UUID + returned: always + type: str + sample: 123456ABC78901D2 + +host_power_status: + description: + - Power status of host. + - Will be one of C(ON), C(OFF) and C(UNKNOWN). + returned: always + type: str + sample: "ON" + version_added: 3.5.0 +''' + +import re +import traceback +import warnings + +HPILO_IMP_ERR = None +try: + import hpilo + HAS_HPILO = True +except ImportError: + HPILO_IMP_ERR = traceback.format_exc() + HAS_HPILO = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +# Suppress warnings from hpilo +warnings.simplefilter('ignore') + + +def parse_flat_interface(entry, non_numeric='hw_eth_ilo'): + try: + infoname = 'hw_eth' + str(int(entry['Port']) - 1) + except Exception: + infoname = non_numeric + + info = { + 'macaddress': entry['MAC'].replace('-', ':'), + 'macaddress_dash': entry['MAC'] + } + return (infoname, info) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + login=dict(type='str', default='Administrator'), + password=dict(type='str', default='admin', no_log=True), + ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), + ), + supports_check_mode=True, + ) + + if not HAS_HPILO: + module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) + + host = module.params['host'] + login = module.params['login'] + password = module.params['password'] + ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) + + ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) + + info = { + 'module_hw': True, + } + + # TODO: Count number of CPUs, DIMMs and total memory + try: + data = ilo.get_host_data() + power_state = ilo.get_host_power_status() + except hpilo.IloCommunicationError as e: + module.fail_json(msg=to_native(e)) + + for entry in data: + if 'type' not in entry: + continue + elif entry['type'] == 0: # BIOS Information + info['hw_bios_version'] = entry['Family'] + info['hw_bios_date'] = entry['Date'] + elif entry['type'] == 1: # System Information + info['hw_uuid'] = entry['UUID'] + info['hw_system_serial'] = entry['Serial Number'].rstrip() + info['hw_product_name'] = entry['Product Name'] + info['hw_product_uuid'] = entry['cUUID'] + elif entry['type'] == 209: # Embedded NIC MAC Assignment + if 'fields' in entry: + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_eth' + str(int(value) - 1) + except Exception: + infoname = 'hw_eth_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + else: + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info + for (name, value) in [(e['name'], e['value']) for e in entry['fields']]: + if name.startswith('Port'): + try: + infoname = 'hw_iscsi' + str(int(value) - 1) + except Exception: + infoname = 'hw_iscsi_ilo' + elif name.startswith('MAC'): + info[infoname] = { + 'macaddress': value.replace('-', ':'), + 'macaddress_dash': value + } + elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format) + (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo') + info[infoname] = entry_info + + # Collect health (RAM/CPU data) + health = ilo.get_embedded_health() + info['hw_health'] = health + + memory_details_summary = health.get('memory', {}).get('memory_details_summary') + # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8 + if memory_details_summary: + info['hw_memory_details_summary'] = memory_details_summary + info['hw_memory_total'] = 0 + for cpu, details in memory_details_summary.items(): + cpu_total_memory_size = details.get('total_memory_size') + if cpu_total_memory_size: + ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size) + if ram: + if ram.group(2) == 'GB': + info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1)) + + # reformat into a text friendly format + info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) + + # Report host state + info['host_power_status'] = power_state or 'UNKNOWN' + + module.exit_json(**info) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hponcfg.py b/ansible_collections/community/general/plugins/modules/hponcfg.py new file mode 100644 index 000000000..612a20d92 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hponcfg.py @@ -0,0 +1,120 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: hponcfg +author: Dag Wieers (@dagwieers) +short_description: Configure HP iLO interface using hponcfg +description: + - This modules configures the HP iLO interface using hponcfg. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + path: + description: + - The XML file as accepted by hponcfg. + required: true + aliases: ['src'] + type: path + minfw: + description: + - The minimum firmware level needed. + required: false + type: str + executable: + description: + - Path to the hponcfg executable (C(hponcfg) which uses $PATH). + default: hponcfg + type: str + verbose: + description: + - Run hponcfg in verbose mode (-v). + default: false + type: bool +requirements: + - hponcfg tool +notes: + - You need a working hponcfg on the target system. +''' + +EXAMPLES = r''' +- name: Example hponcfg configuration XML + ansible.builtin.copy: + content: | + + + + + + + + + + + + + + dest: /tmp/enable-ssh.xml + +- name: Configure HP iLO using enable-ssh.xml + community.general.hponcfg: + src: /tmp/enable-ssh.xml + +- name: Configure HP iLO on VMware ESXi hypervisor + community.general.hponcfg: + src: /tmp/enable-ssh.xml + executable: /opt/hp/tools/hponcfg +''' + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class HPOnCfg(ModuleHelper): + module = dict( + argument_spec=dict( + src=dict(type='path', required=True, aliases=['path']), + minfw=dict(type='str'), + executable=dict(default='hponcfg', type='str'), + verbose=dict(default=False, type='bool'), + ) + ) + command_args_formats = dict( + src=cmd_runner_fmt.as_opt_val("-f"), + verbose=cmd_runner_fmt.as_bool("-v"), + minfw=cmd_runner_fmt.as_opt_val("-m"), + ) + + def __run__(self): + runner = CmdRunner( + self.module, + self.vars.executable, + self.command_args_formats, + check_rc=True, + ) + runner(['src', 'verbose', 'minfw']).run() + + # Consider every action a change (not idempotent yet!) + self.changed = True + + +def main(): + HPOnCfg.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/htpasswd.py b/ansible_collections/community/general/plugins/modules/htpasswd.py new file mode 100644 index 000000000..180b02073 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/htpasswd.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: htpasswd +short_description: Manage user files for basic authentication +description: + - Add and remove username/password entries in a password file using htpasswd. + - This is used by web servers such as Apache and Nginx for basic authentication. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + type: path + required: true + aliases: [ dest, destfile ] + description: + - Path to the file that contains the usernames and passwords + name: + type: str + required: true + aliases: [ username ] + description: + - User name to add or remove + password: + type: str + required: false + description: + - Password associated with user. + - Must be specified if user does not exist yet. + crypt_scheme: + type: str + required: false + default: "apr_md5_crypt" + description: + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + C(portable_apache22) and C(host_apache24); or C(md5_crypt) and C(sha256_crypt), + which are Linux passwd hashes. Only some schemes in addition to + the four choices below will be compatible with Apache or Nginx, and + supported schemes depend on passlib version and its dependencies. + - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). + - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext).' + state: + type: str + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the user entry should be present or not + create: + required: false + type: bool + default: true + description: + - Used with I(state=present). If specified, the file will be created + if it does not already exist. If set to C(false), will fail if the + file does not exist +notes: + - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." + - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." + - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." +requirements: [ passlib>=1.6 ] +author: "Ansible Core Team" +extends_documentation_fragment: + - files + - community.general.attributes +''' + +EXAMPLES = """ +- name: Add a user to a password file and ensure permissions are set + community.general.htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + +- name: Remove a user from a password file + community.general.htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + +- name: Add a user to a password file suitable for use by libpam-pwdfile + community.general.htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt +""" + + +import os +import tempfile +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +PASSLIB_IMP_ERR = None +try: + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext + import passlib +except ImportError: + PASSLIB_IMP_ERR = traceback.format_exc() + passlib_installed = False +else: + passlib_installed = True + +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + + +def create_missing_directories(dest): + destpath = os.path.dirname(dest) + if not os.path.exists(destpath): + os.makedirs(destpath) + + +def present(dest, username, password, crypt_scheme, create, check_mode): + """ Ensures user is present + + Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + if not os.path.exists(dest): + if not create: + raise ValueError('Destination %s does not exist' % dest) + if check_mode: + return ("Create %s" % dest, True) + create_missing_directories(dest) + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Created %s and added %s" % (dest, username), True) + else: + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) + else: + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + + found = None + if getattr(ht, 'check_password', None): + found = ht.check_password(username, password) + else: + found = ht.verify(username, password) + + if found: + return ("%s already present" % username, False) + else: + if not check_mode: + if getattr(ht, 'set_password', None): + ht.set_password(username, password) + else: + ht.update(username, password) + ht.save() + return ("Add/update %s" % username, True) + + +def absent(dest, username, check_mode): + """ Ensures user is absent + + Returns (msg, changed) """ + if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): + ht = HtpasswdFile(dest, new=False) + else: + ht = HtpasswdFile(dest) + + if username not in ht.users(): + return ("%s not present" % username, False) + else: + if not check_mode: + ht.delete(username) + ht.save() + return ("Remove %s" % username, True) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["dest", "destfile"]), + name=dict(type='str', required=True, aliases=["username"]), + password=dict(type='str', required=False, default=None, no_log=True), + crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"), + state=dict(type='str', required=False, default="present", choices=["present", "absent"]), + create=dict(type='bool', default=True), + + ) + module = AnsibleModule(argument_spec=arg_spec, + add_file_common_args=True, + supports_check_mode=True) + + path = module.params['path'] + username = module.params['name'] + password = module.params['password'] + crypt_scheme = module.params['crypt_scheme'] + state = module.params['state'] + create = module.params['create'] + check_mode = module.check_mode + + if not passlib_installed: + module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [f.write(line) for line in lines if line.strip()] + finally: + f.close() + + try: + if state == 'present': + (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) + (msg, changed) = absent(path, username, check_mode) + else: + module.fail_json(msg="Invalid state: %s" % state) + + check_file_attrs(module, changed, msg) + module.exit_json(msg=msg, changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py new file mode 100644 index 000000000..434db242f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py @@ -0,0 +1,2142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_ecs_instance +description: + - instance management. +short_description: Creates a resource of Ecs/Instance in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 + characters, including letters, digits, underscores C(_), hyphens + (-), periods (.). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the + network of the NIC must belong to the VPC specified by vpc_id. A + maximum of 12 NICs can be attached to an ECS. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 + address. Its value must be an unused IP + address in the network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - co-p1 is high I/O (performance-optimized I) disk type. + - uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 + disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is + 1 to 1024. The system disk size must be + greater than or equal to the minimum system disk size + supported by the image (min_disk attribute of the image). + If this parameter is not specified or is set to 0, the + default system disk size is the minimum value of the + system disk in the image (min_disk attribute of the + image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk + contained in the full-ECS image. + type: str + required: false + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account + for logging in to an ECS using password authentication. The Linux + administrator is root, and the Windows administrator is + Administrator. Password complexity requirements, consists of 8 to + 26 characters. The password must contain at least three of the + following character types 'uppercase letters, lowercase letters, + digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password + cannot contain the username or the username in reverse. The + Windows ECS password cannot contain the username, the username in + reverse, or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + elements: dict + required: false + suboptions: + volume_id: + description: + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. + type: str + required: false + description: + description: + - Specifies the description of an ECS, which is a null string by + default. Can contain a maximum of 85 characters. Cannot contain + special characters, such as < and >. + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. + Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS + belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this + parameter is left blank, the default security group is bound to + the ECS by default. + type: list + elements: str + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS + supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation + process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with + base64. The maximum size of the content to be injected (before + encoding) is 32 KB. For Linux ECSs, this parameter does not take + effect when adminPass is used. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create an ecs instance +- name: Create a vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create a subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a eip + hwc_vpc_eip: + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + type: "5_bgp" + register: eip +- name: Create a disk + hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 + register: disk +- name: Create an instance + community.general.hwc_ecs_instance: + data_volumes: + - volume_id: "{{ disk.id }}" + enable_auto_recovery: false + eip_id: "{{ eip.id }}" + name: "ansible_ecs_instance_test" + availability_zone: "cn-north-1a" + nics: + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + - subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.34" + server_tags: + my_server: "my_server" + image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892" + flavor_name: "s3.small.1" + vpc_id: "{{ vpc.id }}" + root_volume: + volume_type: "SAS" +''' + +RETURN = ''' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success + image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success + name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 + characters, including letters, digits, underscores C(_), hyphens + (-), periods (.)". + type: str + returned: success + nics: + description: + - Specifies the NIC information of the ECS. The + network of the NIC must belong to the VPC specified by vpc_id. A + maximum of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 + address. Its value must be an unused IP + address in the network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - co-p1 is high I/O (performance-optimized I) disk type. + - uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 + disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is + 1 to 1024. The system disk size must be + greater than or equal to the minimum system disk size + supported by the image (min_disk attribute of the image). + If this parameter is not specified or is set to 0, the + default system disk size is the minimum value of the + system disk in the image (min_disk attribute of the + image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk + contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success + admin_pass: + description: + - Specifies the initial login password of the administrator account + for logging in to an ECS using password authentication. The Linux + administrator is root, and the Windows administrator is + Administrator. Password complexity requirements consists of 8 to + 26 characters. The password must contain at least three of the + following character types "uppercase letters, lowercase letters, + digits, and special characters (!@$%^-_=+[{}]:,./?)". The password + cannot contain the username or the username in reverse. The + Windows ECS password cannot contain the username, the username in + reverse, or more than two consecutive characters in the username. + type: str + returned: success + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + description: + description: + - Specifies the description of an ECS, which is a null string by + default. Can contain a maximum of 85 characters. Cannot contain + special characters, such as < and >. + type: str + returned: success + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. + Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS + belongs. + type: str + returned: success + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left + blank, the default security group is bound to the ECS by default. + type: list + returned: success + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS + supports up to 10 tags. + type: dict + returned: success + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success + user_data: + description: + - Specifies the user data to be injected during the ECS creation + process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum + size of the content to be injected (before encoding) is 32 KB. For + Linux ECSs, this parameter does not take effect when adminPass is + used. + type: str + returned: success + config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success + created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success + disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image + space is not expanded. AUTO is the image space of the system disk + will be expanded to be as same as the flavor. + type: str + returned: success + host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success + image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success + power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success + server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success + status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, + REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, + and DELETED. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + flavor_name=dict(type='str', required=True), + image_id=dict(type='str', required=True), + name=dict(type='str', required=True), + nics=dict( + type='list', required=True, elements='dict', + options=dict( + ip_address=dict(type='str', required=True), + subnet_id=dict(type='str', required=True) + ), + ), + root_volume=dict(type='dict', required=True, options=dict( + volume_type=dict(type='str', required=True), + size=dict(type='int'), + snapshot_id=dict(type='str') + )), + vpc_id=dict(type='str', required=True), + admin_pass=dict(type='str', no_log=True), + data_volumes=dict(type='list', elements='dict', options=dict( + volume_id=dict(type='str', required=True), + device=dict(type='str') + )), + description=dict(type='str'), + eip_id=dict(type='str'), + enable_auto_recovery=dict(type='bool'), + enterprise_project_id=dict(type='str'), + security_groups=dict(type='list', elements='str'), + server_metadata=dict(type='dict'), + server_tags=dict(type='dict'), + ssh_key_name=dict(type='str'), + user_data=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "ecs") + + try: + _init(config) + is_exist = module.params['id'] + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params['id']: + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "admin_pass": module.params.get("admin_pass"), + "availability_zone": module.params.get("availability_zone"), + "data_volumes": module.params.get("data_volumes"), + "description": module.params.get("description"), + "eip_id": module.params.get("eip_id"), + "enable_auto_recovery": module.params.get("enable_auto_recovery"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "flavor_name": module.params.get("flavor_name"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "nics": module.params.get("nics"), + "root_volume": module.params.get("root_volume"), + "security_groups": module.params.get("security_groups"), + "server_metadata": module.params.get("server_metadata"), + "server_tags": module.params.get("server_tags"), + "ssh_key_name": module.params.get("ssh_key_name"), + "user_data": module.params.get("user_data"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait(config, r, client, timeout) + + sub_job_identity = { + "job_type": "createSingleServer", + } + for item in navigate_value(obj, ["entities", "sub_jobs"]): + for k, v in sub_job_identity.items(): + if item[k] != v: + break + else: + obj = item + break + else: + raise Exception("Can't find the sub job") + module.params['id'] = navigate_value(obj, ["entities", "server_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + client = config.client(get_region(module), "ecs", "project") + + params = build_delete_nics_parameters(expect_state) + params1 = build_delete_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_delete_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + params = build_set_auto_recovery_parameters(expect_state) + params1 = build_set_auto_recovery_parameters(current_state) + if params and are_different_dicts(params, params1): + send_set_auto_recovery_request(module, params, client) + + params = build_attach_nics_parameters(expect_state) + params1 = build_attach_nics_parameters(current_state) + if params and are_different_dicts(params, params1): + r = send_attach_nics_request(module, params, client) + async_wait(config, r, client, timeout) + + multi_invoke_delete_volume(config, expect_state, client, timeout) + + multi_invoke_attach_data_disk(config, expect_state, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_delete_parameters(opts) + if params: + r = send_delete_request(module, params, client) + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + + res = {} + + r = send_read_request(module, client) + preprocess_read_response(r) + res["read"] = fill_read_resp_body(r) + + r = send_read_auto_recovery_request(module, client) + res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r) + + return res, None + + +def preprocess_read_response(resp): + v = resp.get("os-extended-volumes:volumes_attached") + if v and isinstance(v, list): + for i in range(len(v)): + if v[i].get("bootIndex") == "0": + root_volume = v[i] + + if (i + 1) != len(v): + v[i] = v[-1] + + v.pop() + + resp["root_volume"] = root_volume + break + + v = resp.get("addresses") + if v: + rv = {} + eips = [] + for val in v.values(): + for item in val: + if item["OS-EXT-IPS:type"] == "floating": + eips.append(item) + else: + rv[item["OS-EXT-IPS:port_id"]] = item + + for item in eips: + k = item["OS-EXT-IPS:port_id"] + if k in rv: + rv[k]["eip_address"] = item.get("addr", "") + else: + rv[k] = item + item["eip_address"] = item.get("addr", "") + item["addr"] = "" + + resp["address"] = rv.values() + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + adjust_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v or v in [False, 0]: + query_params.append( + "enterprise_project_id=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={offset}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "ecs", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "cloudservers/detail" + query_link + + result = [] + p = {'offset': 1} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + adjust_list_resp(identity_obj, item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['offset'] += 1 + + return result + + +def build_delete_nics_parameters(opts): + params = dict() + + v = expand_delete_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_delete_nics_nics(d, array_index): + cv = d["current_state"].get("nics") + if not cv: + return None + + val = cv + + ev = d.get("nics") + if ev: + m = [item.get("ip_address") for item in ev] + val = [item for item in cv if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("port_id") + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_delete_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics/delete") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_set_auto_recovery_parameters(opts): + params = dict() + + v = expand_set_auto_recovery_support_auto_recovery(opts, None) + if v is not None: + params["support_auto_recovery"] = v + + return params + + +def expand_set_auto_recovery_support_auto_recovery(d, array_index): + v = navigate_value(d, ["enable_auto_recovery"], None) + return None if v is None else str(v).lower() + + +def send_set_auto_recovery_request(module, params, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(set_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_pass"], None) + if not is_empty_value(v): + params["adminPass"] = v + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = expand_create_extendparam(opts, None) + if not is_empty_value(v): + params["extendparam"] = v + + v = navigate_value(opts, ["flavor_name"], None) + if not is_empty_value(v): + params["flavorRef"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = navigate_value(opts, ["ssh_key_name"], None) + if not is_empty_value(v): + params["key_name"] = v + + v = navigate_value(opts, ["server_metadata"], None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + v = expand_create_root_volume(opts, None) + if not is_empty_value(v): + params["root_volume"] = v + + v = expand_create_security_groups(opts, None) + if not is_empty_value(v): + params["security_groups"] = v + + v = expand_create_server_tags(opts, None) + if not is_empty_value(v): + params["server_tags"] = v + + v = navigate_value(opts, ["user_data"], None) + if not is_empty_value(v): + params["user_data"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpcid"] = v + + if not params: + return params + + params = {"server": params} + + return params + + +def expand_create_extendparam(d, array_index): + r = dict() + + r["chargingMode"] = 0 + + v = navigate_value(d, ["enterprise_project_id"], array_index) + if not is_empty_value(v): + r["enterprise_project_id"] = v + + v = navigate_value(d, ["enable_auto_recovery"], array_index) + if not is_empty_value(v): + r["support_auto_recovery"] = v + + return r + + +def expand_create_nics(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + v = navigate_value( + d, ["nics"], new_ai) + + if not v: + return req + n = len(v) + for i in range(n): + new_ai["nics"] = i + transformed = dict() + + v = navigate_value(d, ["nics", "ip_address"], new_ai) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["nics", "subnet_id"], new_ai) + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["eip_id"], array_index) + if not is_empty_value(v): + r["id"] = v + + return r + + +def expand_create_root_volume(d, array_index): + r = dict() + + v = expand_create_root_volume_extendparam(d, array_index) + if not is_empty_value(v): + r["extendparam"] = v + + v = navigate_value(d, ["root_volume", "size"], array_index) + if not is_empty_value(v): + r["size"] = v + + v = navigate_value(d, ["root_volume", "volume_type"], array_index) + if not is_empty_value(v): + r["volumetype"] = v + + return r + + +def expand_create_root_volume_extendparam(d, array_index): + r = dict() + + v = navigate_value(d, ["root_volume", "snapshot_id"], array_index) + if not is_empty_value(v): + r["snapshotId"] = v + + return r + + +def expand_create_security_groups(d, array_index): + v = d.get("security_groups") + if not v: + return None + + return [{"id": i} for i in v] + + +def expand_create_server_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [{"key": k, "value": v1} for k, v1 in v.items()] + + +def send_create_request(module, params, client): + url = "cloudservers" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_nics_parameters(opts): + params = dict() + + v = expand_attach_nics_nics(opts, None) + if not is_empty_value(v): + params["nics"] = v + + return params + + +def expand_attach_nics_nics(d, array_index): + ev = d.get("nics") + if not ev: + return None + + val = ev + + cv = d["current_state"].get("nics") + if cv: + m = [item.get("ip_address") for item in cv] + val = [item for item in ev if item.get("ip_address") not in m] + + r = [] + for item in val: + transformed = dict() + + v = item.get("ip_address") + if not is_empty_value(v): + transformed["ip_address"] = v + + v = item.get("subnet_id") + if not is_empty_value(v): + transformed["subnet_id"] = v + + if transformed: + r.append(transformed) + + return r + + +def send_attach_nics_request(module, params, client): + url = build_path(module, "cloudservers/{id}/nics") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_nics), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_volume_request(module, params, client, info): + path_parameters = { + "volume_id": ["volume_id"], + } + data = dict((key, navigate_value(info, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete_volume), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_attach_data_disk_parameters(opts, array_index): + params = dict() + + v = expand_attach_data_disk_volume_attachment(opts, array_index) + if not is_empty_value(v): + params["volumeAttachment"] = v + + return params + + +def expand_attach_data_disk_volume_attachment(d, array_index): + r = dict() + + v = navigate_value(d, ["data_volumes", "device"], array_index) + if not is_empty_value(v): + r["device"] = v + + v = navigate_value(d, ["data_volumes", "volume_id"], array_index) + if not is_empty_value(v): + r["volumeId"] = v + + return r + + +def send_attach_data_disk_request(module, params, client): + url = build_path(module, "cloudservers/{id}/attachvolume") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(attach_data_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_delete_parameters(opts): + params = dict() + + params["delete_publicip"] = False + + params["delete_volume"] = False + + v = expand_delete_servers(opts, None) + if not is_empty_value(v): + params["servers"] = v + + return params + + +def expand_delete_servers(d, array_index): + new_ai = dict() + if array_index: + new_ai.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = expand_delete_servers_id(d, new_ai) + if not is_empty_value(v): + transformed["id"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_delete_servers_id(d, array_index): + return d["ansible_module"].params.get("id") + + +def send_delete_request(module, params, client): + url = "cloudservers/delete" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + url = build_path(module, "jobs/{job_id}", result) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_ecs_instance): error " + "waiting to be done, error= %s" % str(ex)) + + +def multi_invoke_delete_volume(config, opts, client, timeout): + module = config.module + + opts1 = None + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in expect] + opts1 = { + "data_volumes": [ + i for i in current if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + r = send_delete_volume_request(module, None, client, loop_val[i]) + async_wait(config, r, client, timeout) + + +def multi_invoke_attach_data_disk(config, opts, client, timeout): + module = config.module + + opts1 = opts + expect = opts["data_volumes"] + current = opts["current_state"]["data_volumes"] + if expect and current: + v = [i["volume_id"] for i in current] + opts1 = { + "data_volumes": [ + i for i in expect if i["volume_id"] not in v + ] + } + + loop_val = navigate_value(opts1, ["data_volumes"]) + if not loop_val: + return + + for i in range(len(loop_val)): + params = build_attach_data_disk_parameters(opts1, {"data_volumes": i}) + r = send_attach_data_disk_request(module, params, client) + async_wait(config, r, client, timeout) + + +def send_read_request(module, client): + url = build_path(module, "cloudservers/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["server"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + v = fill_read_resp_address(body.get("address")) + result["address"] = v + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_read_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_read_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + v = fill_read_resp_os_extended_volumes_volumes_attached( + body.get("os-extended-volumes:volumes_attached")) + result["os-extended-volumes:volumes_attached"] = v + + v = fill_read_resp_root_volume(body.get("root_volume")) + result["root_volume"] = v + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_read_resp_address(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id") + + val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type") + + val["addr"] = item.get("addr") + + result.append(val) + + return result + + +def fill_read_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["image_name"] = value.get("image_name") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_os_extended_volumes_volumes_attached(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["bootIndex"] = item.get("bootIndex") + + val["device"] = item.get("device") + + val["id"] = item.get("id") + + result.append(val) + + return result + + +def fill_read_resp_root_volume(value): + if not value: + return None + + result = dict() + + result["device"] = value.get("device") + + result["id"] = value.get("id") + + return result + + +def send_read_auto_recovery_request(module, client): + url = build_path(module, "cloudservers/{id}/autorecovery") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(read_auto_recovery), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def fill_read_auto_recovery_resp_body(body): + result = dict() + + result["support_auto_recovery"] = body.get("support_auto_recovery") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = navigate_value( + response, ["read", "OS-EXT-AZ:availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "config_drive"], array_index) + r["config_drive"] = v + + v = navigate_value(response, ["read", "created"], array_index) + r["created"] = v + + v = flatten_data_volumes(response, array_index) + r["data_volumes"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index) + r["disk_config_type"] = v + + v = flatten_enable_auto_recovery(response, array_index) + r["enable_auto_recovery"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "flavor", "id"], array_index) + r["flavor_name"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index) + r["host_name"] = v + + v = navigate_value(response, ["read", "image", "id"], array_index) + r["image_id"] = v + + v = navigate_value( + response, ["read", "metadata", "image_name"], array_index) + r["image_name"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = flatten_nics(response, array_index) + r["nics"] = v + + v = navigate_value( + response, ["read", "OS-EXT-STS:power_state"], array_index) + r["power_state"] = v + + v = flatten_root_volume(response, array_index) + r["root_volume"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index) + r["server_alias"] = v + + v = flatten_server_tags(response, array_index) + r["server_tags"] = v + + v = navigate_value(response, ["read", "key_name"], array_index) + r["ssh_key_name"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value( + response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index) + r["user_data"] = v + + v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def flatten_data_volumes(d, array_index): + v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.os-extended-volumes:volumes_attached"] = i + + val = dict() + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai) + val["device"] = v + + v = navigate_value( + d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai) + val["volume_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_auto_recovery(d, array_index): + v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"], + array_index) + return v == "true" + + +def flatten_nics(d, array_index): + v = navigate_value(d, ["read", "address"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.address"] = i + + val = dict() + + v = navigate_value(d, ["read", "address", "addr"], new_ai) + val["ip_address"] = v + + v = navigate_value( + d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai) + val["port_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_root_volume(d, array_index): + result = dict() + + v = navigate_value(d, ["read", "root_volume", "device"], array_index) + result["device"] = v + + v = navigate_value(d, ["read", "root_volume", "id"], array_index) + result["volume_id"] = v + + for v in result.values(): + if v is not None: + return result + return None + + +def flatten_server_tags(d, array_index): + v = navigate_value(d, ["read", "tags"], array_index) + if not v: + return None + + r = dict() + for item in v: + v1 = item.split("=") + if v1: + r[v1[0]] = v1[1] + return r + + +def adjust_options(opts, states): + adjust_data_volumes(opts, states) + + adjust_nics(opts, states) + + +def adjust_data_volumes(parent_input, parent_cur): + iv = parent_input.get("data_volumes") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("data_volumes") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["volume_id"] != icv["volume_id"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(data_volumes) failed, " + "the array number is not equal") + + parent_cur["data_volumes"] = result + + +def adjust_nics(parent_input, parent_cur): + iv = parent_input.get("nics") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("nics") + if not (cv and isinstance(cv, list)): + return + + lcv = len(cv) + result = [] + q = [] + for iiv in iv: + if len(q) == lcv: + break + + icv = None + for j in range(lcv): + if j in q: + continue + + icv = cv[j] + + if iiv["ip_address"] != icv["ip_address"]: + continue + + result.append(icv) + q.append(j) + break + else: + break + + if len(q) != lcv: + for i in range(lcv): + if i not in q: + result.append(cv[i]) + + if len(result) != lcv: + raise Exception("adjust property(nics) failed, " + "the array number is not equal") + + parent_cur["nics"] = result + + +def set_unreadable_options(opts, states): + states["admin_pass"] = opts.get("admin_pass") + + states["eip_id"] = opts.get("eip_id") + + set_unread_nics( + opts.get("nics"), states.get("nics")) + + set_unread_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + states["security_groups"] = opts.get("security_groups") + + states["server_metadata"] = opts.get("server_metadata") + + +def set_unread_nics(inputv, curv): + if not (inputv and isinstance(inputv, list)): + return + + if not (curv and isinstance(curv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + cv["subnet_id"] = iv.get("subnet_id") + + +def set_unread_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + curv["size"] = inputv.get("size") + + curv["snapshot_id"] = inputv.get("snapshot_id") + + curv["volume_type"] = inputv.get("volume_type") + + +def set_readonly_options(opts, states): + opts["config_drive"] = states.get("config_drive") + + opts["created"] = states.get("created") + + opts["disk_config_type"] = states.get("disk_config_type") + + opts["host_name"] = states.get("host_name") + + opts["image_name"] = states.get("image_name") + + set_readonly_nics( + opts.get("nics"), states.get("nics")) + + opts["power_state"] = states.get("power_state") + + set_readonly_root_volume( + opts.get("root_volume"), states.get("root_volume")) + + opts["server_alias"] = states.get("server_alias") + + opts["status"] = states.get("status") + + +def set_readonly_nics(inputv, curv): + if not (curv and isinstance(curv, list)): + return + + if not (inputv and isinstance(inputv, list)): + return + + lcv = len(curv) + q = [] + for iv in inputv: + if len(q) == lcv: + break + + cv = None + for j in range(lcv): + if j in q: + continue + + cv = curv[j] + + if iv["ip_address"] != cv["ip_address"]: + continue + + q.append(j) + break + else: + continue + + iv["port_id"] = cv.get("port_id") + + +def set_readonly_root_volume(inputv, curv): + if not (inputv and isinstance(inputv, dict)): + return + + if not (curv and isinstance(curv, dict)): + return + + inputv["device"] = curv.get("device") + + inputv["volume_id"] = curv.get("volume_id") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_ecs_instance): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["servers"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["OS-DCF:diskConfig"] = None + + v = navigate_value(all_opts, ["availability_zone"], None) + result["OS-EXT-AZ:availability_zone"] = v + + result["OS-EXT-SRV-ATTR:hostname"] = None + + result["OS-EXT-SRV-ATTR:instance_name"] = None + + v = navigate_value(all_opts, ["user_data"], None) + result["OS-EXT-SRV-ATTR:user_data"] = v + + result["OS-EXT-STS:power_state"] = None + + result["config_drive"] = None + + result["created"] = None + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + v = expand_list_flavor(all_opts, None) + result["flavor"] = v + + result["id"] = None + + v = expand_list_image(all_opts, None) + result["image"] = v + + v = navigate_value(all_opts, ["ssh_key_name"], None) + result["key_name"] = v + + v = expand_list_metadata(all_opts, None) + result["metadata"] = v + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["status"] = None + + v = expand_list_tags(all_opts, None) + result["tags"] = v + + return result + + +def expand_list_flavor(d, array_index): + r = dict() + + v = navigate_value(d, ["flavor_name"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_image(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_tags(d, array_index): + v = d.get("server_tags") + if not v: + return None + + return [k + "=" + v1 for k, v1 in v.items()] + + +def fill_list_resp_body(body): + result = dict() + + result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig") + + result["OS-EXT-AZ:availability_zone"] = body.get( + "OS-EXT-AZ:availability_zone") + + result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname") + + result["OS-EXT-SRV-ATTR:instance_name"] = body.get( + "OS-EXT-SRV-ATTR:instance_name") + + result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data") + + result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state") + + result["config_drive"] = body.get("config_drive") + + result["created"] = body.get("created") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + v = fill_list_resp_flavor(body.get("flavor")) + result["flavor"] = v + + result["id"] = body.get("id") + + v = fill_list_resp_image(body.get("image")) + result["image"] = v + + result["key_name"] = body.get("key_name") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["name"] = body.get("name") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + return result + + +def fill_list_resp_flavor(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_image(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def adjust_list_resp(opts, resp): + adjust_list_api_tags(opts, resp) + + +def adjust_list_api_tags(parent_input, parent_cur): + iv = parent_input.get("tags") + if not (iv and isinstance(iv, list)): + return + + cv = parent_cur.get("tags") + if not (cv and isinstance(cv, list)): + return + + result = [] + for iiv in iv: + if iiv not in cv: + break + + result.append(iiv) + + j = cv.index(iiv) + cv[j] = cv[-1] + cv.pop() + + if cv: + result.extend(cv) + parent_cur["tags"] = result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py b/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py new file mode 100644 index 000000000..7d445ddd2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py @@ -0,0 +1,1217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_evs_disk +description: + - block storage management. +short_description: Creates a resource of Evs/Disk in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huaweicloud Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 + bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or + SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the + disk will fail to create. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the + snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. + This parameter is mandatory when you use a backup to create the + disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum + of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs + to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to True, the disk device type will be + SCSI, which allows ECS OSs to directly access underlying storage + media. SCSI reservation command is supported. If this parameter + is set to False, the disk device type will be VBD, which supports + only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter + is not specified, shared SCSI disks are created. SCSI EVS disks + cannot be created from backups, which means that this parameter + cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is + False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with + the disk during the disk creation. If it is not specified, the + disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk + is created from an image. BMS system disks cannot be + created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System + disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This + parameter is mandatory when you create an empty disk or use an + image or a snapshot to create a disk. If you use an image or a + snapshot to create a disk, the disk size must be greater than or + equal to the image or snapshot size. This parameter is optional + when you use a backup to create a disk. If this parameter is not + specified, the disk size is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the + disk is created from a snapshot. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# test create disk +- name: Create a disk + community.general.hwc_evs_disk: + availability_zone: "cn-north-1a" + name: "ansible_evs_disk_test" + volume_type: "SATA" + size: 10 +''' + +RETURN = ''' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 + bytes. + type: str + returned: success + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or + SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the + disk will fail to create. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the + snapshot's source disk. + type: str + returned: success + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. + This parameter is mandatory when you use a backup to create the + disk. + type: str + returned: success + description: + description: + - Specifies the disk description. The value can contain a maximum + of 255 bytes. + type: str + returned: success + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs + to be used, set this parameter to True. + type: bool + returned: success + enable_scsi: + description: + - If this parameter is set to True, the disk device type will be + SCSI, which allows ECS OSs to directly access underlying storage + media. SCSI reservation command is supported. If this parameter + is set to False, the disk device type will be VBD, which supports + only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter + is not specified, shared SCSI disks are created. SCSI EVS disks + cannot be created from backups, which means that this parameter + cannot be True if backup_id has been specified. + type: bool + returned: success + enable_share: + description: + - Specifies whether the disk is shareable. The default value is + False. + type: bool + returned: success + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with + the disk during the disk creation. If it is not specified, the + disk is bound to the default enterprise project. + type: str + returned: success + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk + is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System + disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This + parameter is mandatory when you create an empty disk or use an + image or a snapshot to create a disk. If you use an image or a + snapshot to create a disk, the disk size must be greater than or + equal to the image or snapshot size. This parameter is optional + when you use a backup to create a disk. If this parameter is not + specified, the disk size is equal to the backup size. + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the + disk is created from a snapshot. + type: str + returned: success + attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time + format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is + attached. + type: str + returned: success + backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success + created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC + YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success + is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True + indicates that the disk is read-only. False indicates that the + disk is read/write. + type: bool + returned: success + source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the + disk is created from a source disk. + type: str + returned: success + status: + description: + - Specifies the disk status. + type: str + returned: success + tags: + description: + - Specifies the disk tags. + type: dict + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='30m', type='str'), + update=dict(default='30m', type='str'), + delete=dict(default='30m', type='str'), + ), default=dict()), + availability_zone=dict(type='str', required=True), + name=dict(type='str', required=True), + volume_type=dict(type='str', required=True), + backup_id=dict(type='str'), + description=dict(type='str'), + enable_full_clone=dict(type='bool'), + enable_scsi=dict(type='bool'), + enable_share=dict(type='bool'), + encryption_id=dict(type='str'), + enterprise_project_id=dict(type='str'), + image_id=dict(type='str'), + size=dict(type='int'), + snapshot_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "evs") + + try: + _init(config) + is_exist = module.params.get('id') + + result = None + changed = False + if module.params['state'] == 'present': + if not is_exist: + if not module.check_mode: + create(config) + changed = True + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + if not module.check_mode: + update(config, inputv, result) + + inputv = user_input_parameters(module) + resp, array_index = read_resource(config) + result = build_state(inputv, resp, array_index) + set_readonly_options(inputv, result) + if are_different_dicts(inputv, result): + raise Exception("Update resource failed, " + "some attributes are not updated") + + changed = True + + result['id'] = module.params.get('id') + else: + result = dict() + if is_exist: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def _init(config): + module = config.module + if module.params.get('id'): + return + + v = search_resource(config) + n = len(v) + if n > 1: + raise Exception("find more than one resources(%s)" % ", ".join([ + navigate_value(i, ["id"]) + for i in v + ])) + + if n == 1: + module.params['id'] = navigate_value(v[0], ["id"]) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "backup_id": module.params.get("backup_id"), + "description": module.params.get("description"), + "enable_full_clone": module.params.get("enable_full_clone"), + "enable_scsi": module.params.get("enable_scsi"), + "enable_share": module.params.get("enable_share"), + "encryption_id": module.params.get("encryption_id"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "image_id": module.params.get("image_id"), + "name": module.params.get("name"), + "size": module.params.get("size"), + "snapshot_id": module.params.get("snapshot_id"), + "volume_type": module.params.get("volume_type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + opts["ansible_module"] = module + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + obj = async_wait(config, r, client1, timeout) + module.params['id'] = navigate_value(obj, ["entities", "volume_id"]) + + +def update(config, expect_state, current_state): + module = config.module + expect_state["current_state"] = current_state + current_state["current_state"] = current_state + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + + params = build_update_parameters(expect_state) + params1 = build_update_parameters(current_state) + if params and are_different_dicts(params, params1): + send_update_request(module, params, client) + + params = build_extend_disk_parameters(expect_state) + params1 = build_extend_disk_parameters(current_state) + if params and are_different_dicts(params, params1): + client1 = config.client(get_region(module), "evsv2.1", "project") + r = send_extend_disk_request(module, params, client1) + + client1 = config.client(get_region(module), "volume", "project") + client1.endpoint = client1.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client1, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "evs", "project") + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + + r = send_delete_request(module, None, client) + + client = config.client(get_region(module), "volume", "project") + client.endpoint = client.endpoint.replace("/v2/", "/v1/") + async_wait(config, r, client, timeout) + + +def read_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return res, None + + +def build_state(opts, response, array_index): + states = flatten_options(response, array_index) + set_unreadable_options(opts, states) + return states + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enable_share"]) + if v or v in [False, 0]: + query_params.append( + "multiattach=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["name"]) + if v or v in [False, 0]: + query_params.append( + "name=" + (str(v) if v else str(v).lower())) + + v = navigate_value(opts, ["availability_zone"]) + if v or v in [False, 0]: + query_params.append( + "availability_zone=" + (str(v) if v else str(v).lower())) + + query_link = "?limit=10&offset={start}" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "volumev3", "project") + opts = user_input_parameters(module) + name = module.params.get("name") + query_link = _build_query_link(opts) + link = "os-vendor-volumes/detail" + query_link + + result = [] + p = {'start': 0} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + if name == item.get("name"): + result.append(item) + + if len(result) > 1: + break + + p['start'] += len(r) + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["backup_id"], None) + if not is_empty_value(v): + params["backup_id"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["image_id"], None) + if not is_empty_value(v): + params["imageRef"] = v + + v = expand_create_metadata(opts, None) + if not is_empty_value(v): + params["metadata"] = v + + v = navigate_value(opts, ["enable_share"], None) + if not is_empty_value(v): + params["multiattach"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["size"], None) + if not is_empty_value(v): + params["size"] = v + + v = navigate_value(opts, ["snapshot_id"], None) + if not is_empty_value(v): + params["snapshot_id"] = v + + v = navigate_value(opts, ["volume_type"], None) + if not is_empty_value(v): + params["volume_type"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def expand_create_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + if not is_empty_value(v): + r["__system__cmkid"] = v + + v = expand_create_metadata_system_encrypted(d, array_index) + if not is_empty_value(v): + r["__system__encrypted"] = v + + v = expand_create_metadata_full_clone(d, array_index) + if not is_empty_value(v): + r["full_clone"] = v + + v = expand_create_metadata_hw_passthrough(d, array_index) + if not is_empty_value(v): + r["hw:passthrough"] = v + + return r + + +def expand_create_metadata_system_encrypted(d, array_index): + v = navigate_value(d, ["encryption_id"], array_index) + return "1" if v else "" + + +def expand_create_metadata_full_clone(d, array_index): + v = navigate_value(d, ["enable_full_clone"], array_index) + return "0" if v else "" + + +def expand_create_metadata_hw_passthrough(d, array_index): + v = navigate_value(d, ["enable_scsi"], array_index) + if v is None: + return v + return "true" if v else "false" + + +def send_create_request(module, params, client): + url = "cloudvolumes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if v is not None: + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"volume": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def build_extend_disk_parameters(opts): + params = dict() + + v = expand_extend_disk_os_extend(opts, None) + if not is_empty_value(v): + params["os-extend"] = v + + return params + + +def expand_extend_disk_os_extend(d, array_index): + r = dict() + + v = navigate_value(d, ["size"], array_index) + if not is_empty_value(v): + r["new_size"] = v + + return r + + +def send_extend_disk_request(module, params, client): + url = build_path(module, "cloudvolumes/{id}/action") + + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(extend_disk), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait(config, result, client, timeout): + module = config.module + + path_parameters = { + "job_id": ["job_id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "jobs/{job_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["SUCCESS"], + ["RUNNING", "INIT"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_evs_disk): error " + "waiting to be done, error= %s" % str(ex)) + + +def send_read_request(module, client): + url = build_path(module, "os-vendor-volumes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volume"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_read_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_read_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_read_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_read_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +def flatten_options(response, array_index): + r = dict() + + v = flatten_attachments(response, array_index) + r["attachments"] = v + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "metadata", "policy"], array_index) + r["backup_policy_id"] = v + + v = navigate_value(response, ["read", "created_at"], array_index) + r["created_at"] = v + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = flatten_enable_full_clone(response, array_index) + r["enable_full_clone"] = v + + v = flatten_enable_scsi(response, array_index) + r["enable_scsi"] = v + + v = navigate_value(response, ["read", "multiattach"], array_index) + r["enable_share"] = v + + v = navigate_value( + response, ["read", "metadata", "__system__cmkid"], array_index) + r["encryption_id"] = v + + v = navigate_value( + response, ["read", "enterprise_project_id"], array_index) + r["enterprise_project_id"] = v + + v = navigate_value( + response, ["read", "volume_image_metadata", "id"], array_index) + r["image_id"] = v + + v = flatten_is_bootable(response, array_index) + r["is_bootable"] = v + + v = flatten_is_readonly(response, array_index) + r["is_readonly"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "size"], array_index) + r["size"] = v + + v = navigate_value(response, ["read", "snapshot_id"], array_index) + r["snapshot_id"] = v + + v = navigate_value(response, ["read", "source_volid"], array_index) + r["source_volume_id"] = v + + v = navigate_value(response, ["read", "status"], array_index) + r["status"] = v + + v = navigate_value(response, ["read", "tags"], array_index) + r["tags"] = v + + v = navigate_value(response, ["read", "volume_type"], array_index) + r["volume_type"] = v + + return r + + +def flatten_attachments(d, array_index): + v = navigate_value(d, ["read", "attachments"], + array_index) + if not v: + return None + n = len(v) + result = [] + + new_ai = dict() + if array_index: + new_ai.update(array_index) + + for i in range(n): + new_ai["read.attachments"] = i + + val = dict() + + v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai) + val["attached_at"] = v + + v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai) + val["attachment_id"] = v + + v = navigate_value(d, ["read", "attachments", "device"], new_ai) + val["device"] = v + + v = navigate_value(d, ["read", "attachments", "server_id"], new_ai) + val["server_id"] = v + + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if result else None + + +def flatten_enable_full_clone(d, array_index): + v = navigate_value(d, ["read", "metadata", "full_clone"], + array_index) + if v is None: + return v + return True if v == "0" else False + + +def flatten_enable_scsi(d, array_index): + v = navigate_value(d, ["read", "metadata", "hw:passthrough"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_bootable(d, array_index): + v = navigate_value(d, ["read", "bootable"], array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def flatten_is_readonly(d, array_index): + v = navigate_value(d, ["read", "metadata", "readonly"], + array_index) + if v is None: + return v + return True if v in ["true", "True"] else False + + +def set_unreadable_options(opts, states): + states["backup_id"] = opts.get("backup_id") + + +def set_readonly_options(opts, states): + opts["attachments"] = states.get("attachments") + + opts["backup_policy_id"] = states.get("backup_policy_id") + + opts["created_at"] = states.get("created_at") + + opts["is_bootable"] = states.get("is_bootable") + + opts["is_readonly"] = states.get("is_readonly") + + opts["source_volume_id"] = states.get("source_volume_id") + + opts["status"] = states.get("status") + + opts["tags"] = states.get("tags") + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_evs_disk): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["volumes"], None) + + +def expand_list_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["encryption_id"], array_index) + r["__system__cmkid"] = v + + r["attached_mode"] = None + + v = navigate_value(d, ["enable_full_clone"], array_index) + r["full_clone"] = v + + v = navigate_value(d, ["enable_scsi"], array_index) + r["hw:passthrough"] = v + + r["policy"] = None + + r["readonly"] = None + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_volume_image_metadata(d, array_index): + r = dict() + + v = navigate_value(d, ["image_id"], array_index) + r["id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_attachments(body.get("attachments")) + result["attachments"] = v + + result["availability_zone"] = body.get("availability_zone") + + result["bootable"] = body.get("bootable") + + result["created_at"] = body.get("created_at") + + result["description"] = body.get("description") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + v = fill_list_resp_metadata(body.get("metadata")) + result["metadata"] = v + + result["multiattach"] = body.get("multiattach") + + result["name"] = body.get("name") + + result["size"] = body.get("size") + + result["snapshot_id"] = body.get("snapshot_id") + + result["source_volid"] = body.get("source_volid") + + result["status"] = body.get("status") + + result["tags"] = body.get("tags") + + v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata")) + result["volume_image_metadata"] = v + + result["volume_type"] = body.get("volume_type") + + return result + + +def fill_list_resp_attachments(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["attached_at"] = item.get("attached_at") + + val["attachment_id"] = item.get("attachment_id") + + val["device"] = item.get("device") + + val["server_id"] = item.get("server_id") + + result.append(val) + + return result + + +def fill_list_resp_metadata(value): + if not value: + return None + + result = dict() + + result["__system__cmkid"] = value.get("__system__cmkid") + + result["attached_mode"] = value.get("attached_mode") + + result["full_clone"] = value.get("full_clone") + + result["hw:passthrough"] = value.get("hw:passthrough") + + result["policy"] = value.get("policy") + + result["readonly"] = value.get("readonly") + + return result + + +def fill_list_resp_volume_image_metadata(value): + if not value: + return None + + result = dict() + + result["id"] = value.get("id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py b/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py new file mode 100644 index 000000000..357fd5520 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py @@ -0,0 +1,500 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2018 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_network_vpc +description: + - Represents an vpc resource. +short_description: Creates a Huawei Cloud VPC +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in vpc. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeout for create operation. + type: str + default: '15m' + update: + description: + - The timeout for update operation. + type: str + default: '15m' + delete: + description: + - The timeout for delete operation. + type: str + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the vpc. + type: str + required: true +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create a vpc + community.general.hwc_network_vpc: + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +''' + +RETURN = ''' + id: + description: + - the id of vpc. + type: str + returned: success + name: + description: + - the name of vpc. + type: str + returned: success + cidr: + description: + - the range of available subnets in the vpc. + type: str + returned: success + status: + description: + - the status of vpc. + type: str + returned: success + routes: + description: + - the route information. + type: complex + returned: success + contains: + destination: + description: + - the destination network segment of a route. + type: str + returned: success + next_hop: + description: + - the next hop of a route. If the route type is peering, + it will provide VPC peering connection ID. + type: str + returned: success + enable_shared_snat: + description: + - show whether the shared snat is enabled. + type: bool + returned: success +''' + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcClientException404, HwcModule, + are_different_dicts, is_empty_value, + wait_to_finish, get_region, + build_path, navigate_value) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict( + default='present', choices=['present', 'absent'], type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + delete=dict(default='15m', type='str'), + ), default=dict()), + name=dict(required=True, type='str'), + cidr=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + config = Config(module, 'vpc') + + state = module.params['state'] + + if (not module.params.get("id")) and module.params.get("name"): + module.params['id'] = get_id_by_name(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "vpc", "project") + fetch = fetch_resource(module, client, link) + if fetch: + fetch = fetch.get('vpc') + changed = False + + if fetch: + if state == 'present': + expect = _get_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {"cidr": current_state["cidr"]} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config, self_link(module)) + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config, self_link(module)) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config, "vpcs") + fetch = response_to_hash(module, fetch.get('vpc')) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.post(link, resource_to_create(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_done = wait_for_operation(config, 'create', r) + v = "" + try: + v = navigate_value(wait_done, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, 'vpcs/{op_id}', {'op_id': v}) + return fetch_resource(module, client, url) + + +def update(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + r = None + try: + r = client.put(link, resource_to_update(module)) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_operation(config, 'update', r) + + return fetch_resource(module, client, link) + + +def delete(config, link): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + wait_for_delete(module, client, link) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_network_vpc): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_id_by_name(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + name = module.params.get("name") + link = "vpcs" + query_link = "?marker={marker}&limit=10" + link += query_link + not_format_keys = re.findall("={marker}", link) + none_values = re.findall("=None", link) + + if not (not_format_keys or none_values): + r = None + try: + r = client.get(link) + except Exception: + pass + if r is None: + return None + r = r.get('vpcs', []) + ids = [ + i.get('id') for i in r if i.get('name', '') == name + ] + if not ids: + return None + elif len(ids) == 1: + return ids[0] + else: + module.fail_json( + msg="Multiple resources with same name are found.") + elif none_values: + module.fail_json( + msg="Can not find id by name because url includes None.") + else: + p = {'marker': ''} + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('vpcs', []) + if r == []: + break + for i in r: + if i.get('name') == name: + ids.add(i.get('id')) + if len(ids) >= 2: + module.fail_json( + msg="Multiple resources with same name are found.") + + p['marker'] = r[-1].get('id') + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "vpcs/{id}") + + +def resource_to_create(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def resource_to_update(module): + params = dict() + + v = module.params.get('cidr') + if not is_empty_value(v): + params["cidr"] = v + + if not params: + return params + + params = {"vpc": params} + + return params + + +def _get_editable_properties(module): + return { + "cidr": module.params.get("cidr"), + } + + +def response_to_hash(module, response): + """ Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + u'id': response.get(u'id'), + u'name': response.get(u'name'), + u'cidr': response.get(u'cidr'), + u'status': response.get(u'status'), + u'routes': VpcRoutesArray( + response.get(u'routes', []), module).from_response(), + u'enable_shared_snat': response.get(u'enable_shared_snat') + } + + +def wait_for_operation(config, op_type, op_result): + module = config.module + op_id = "" + try: + op_id = navigate_value(op_result, ['vpc', 'id']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + url = build_path(module, "vpcs/{op_id}", {'op_id': op_id}) + timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m')) + states = { + 'create': { + 'allowed': ['CREATING', 'DONW', 'OK'], + 'complete': ['OK'], + }, + 'update': { + 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'], + 'complete': ['OK'], + } + } + + return wait_for_completion(url, timeout, states[op_type]['allowed'], + states[op_type]['complete'], config) + + +def wait_for_completion(op_uri, timeout, allowed_states, + complete_states, config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + def _refresh_status(): + r = None + try: + r = fetch_resource(module, client, op_uri) + except Exception: + return None, "" + + status = "" + try: + status = navigate_value(r, ['vpc', 'status']) + except Exception: + return None, "" + + return r, status + + try: + return wait_to_finish(complete_states, allowed_states, + _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def wait_for_delete(module, client, link): + + def _refresh_status(): + try: + client.get(link) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m')) + try: + return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +class VpcRoutesArray(object): + def __init__(self, request, module): + self.module = module + if request: + self.request = request + else: + self.request = [] + + def to_request(self): + items = [] + for item in self.request: + items.append(self._request_for_item(item)) + return items + + def from_response(self): + items = [] + for item in self.request: + items.append(self._response_from_item(item)) + return items + + def _request_for_item(self, item): + return { + u'destination': item.get('destination'), + u'nexthop': item.get('next_hop') + } + + def _response_from_item(self, item): + return { + u'destination': item.get(u'destination'), + u'next_hop': item.get(u'nexthop') + } + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py new file mode 100644 index 000000000..88207d3f9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_smn_topic +description: + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huaweicloud Cloud +author: Huawei Inc. (@huaweicloud) +requirements: + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huaweicloud Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email + sender in an email message. The topic display name contains a + maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 + to 256 characters. It must contain upper- or lower-case letters, + digits, hyphens (-), and underscores C(_), and must start with a + letter or digit. + type: str + required: true +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create a smn topic + community.general.hwc_smn_topic: + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +''' + +RETURN = ''' +create_time: + description: + - Time when the topic was created. + returned: success + type: str +display_name: + description: + - Topic display name, which is presented as the name of the email + sender in an email message. The topic display name contains a + maximum of 192 bytes. + returned: success + type: str +name: + description: + - Name of the topic to be created. The topic name is a string of 1 + to 256 characters. It must contain upper- or lower-case letters, + digits, hyphens (-), and underscores C(_), and must start with a + letter or digit. + returned: success + type: str +push_policy: + description: + - Message pushing policy. 0 indicates that the message sending + fails and the message is cached in the queue. 1 indicates that + the failed message is discarded. + returned: success + type: int +topic_urn: + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str +update_time: + description: + - Time when the topic was updated. + returned: success + type: str +''' + +############################################################################### +# Imports +############################################################################### + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException, + HwcModule, navigate_value, + are_different_dicts, is_empty_value, + build_path, get_region) +import re + +############################################################################### +# Main +############################################################################### + + +def main(): + """Main function""" + + module = HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + display_name=dict(type='str'), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + config = Config(module, "smn") + + state = module.params['state'] + + if not module.params.get("id"): + module.params['id'] = get_resource_id(config) + + fetch = None + link = self_link(module) + # the link will include Nones if required format parameters are missed + if not re.search('/None/|/None$', link): + client = config.client(get_region(module), "smn", "project") + fetch = fetch_resource(module, client, link) + changed = False + + if fetch: + if state == 'present': + expect = _get_resource_editable_properties(module) + current_state = response_to_hash(module, fetch) + current = {'display_name': current_state['display_name']} + if are_different_dicts(expect, current): + if not module.check_mode: + fetch = update(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = current_state + else: + if not module.check_mode: + delete(config) + fetch = {} + changed = True + else: + if state == 'present': + if not module.check_mode: + fetch = create(config) + fetch = response_to_hash(module, fetch) + changed = True + else: + fetch = {} + + fetch.update({'changed': changed}) + + module.exit_json(**fetch) + + +def create(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + r = None + try: + r = client.post(link, create_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error creating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return get_resource(config, r) + + +def update(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.put(link, update_resource_opts(module)) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error updating " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + return fetch_resource(module, client, link) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = self_link(module) + try: + client.delete(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error deleting " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def fetch_resource(module, client, link): + try: + return client.get(link) + except HwcClientException as ex: + msg = ("module(hwc_smn_topic): error fetching " + "resource, error: %s" % str(ex)) + module.fail_json(msg=msg) + + +def get_resource(config, result): + module = config.module + client = config.client(get_region(module), "smn", "project") + + v = "" + try: + v = navigate_value(result, ['topic_urn']) + except Exception as ex: + module.fail_json(msg=str(ex)) + + d = {'topic_urn': v} + url = build_path(module, 'notifications/topics/{topic_urn}', d) + + return fetch_resource(module, client, url) + + +def get_resource_id(config): + module = config.module + client = config.client(get_region(module), "smn", "project") + + link = "notifications/topics" + query_link = "?offset={offset}&limit=10" + link += query_link + + p = {'offset': 0} + v = module.params.get('name') + ids = set() + while True: + r = None + try: + r = client.get(link.format(**p)) + except Exception: + pass + if r is None: + break + r = r.get('topics', []) + if r == []: + break + for i in r: + if i.get('name') == v: + ids.add(i.get('topic_urn')) + if len(ids) >= 2: + module.fail_json(msg="Multiple resources are found") + + p['offset'] += 1 + + return ids.pop() if ids else None + + +def self_link(module): + return build_path(module, "notifications/topics/{id}") + + +def create_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + v = module.params.get('name') + if not is_empty_value(v): + params["name"] = v + + return params + + +def update_resource_opts(module): + params = dict() + + v = module.params.get('display_name') + if not is_empty_value(v): + params["display_name"] = v + + return params + + +def _get_resource_editable_properties(module): + return { + "display_name": module.params.get("display_name"), + } + + +def response_to_hash(module, response): + """Remove unnecessary properties from the response. + This is for doing comparisons with Ansible's current parameters. + """ + return { + u'create_time': response.get(u'create_time'), + u'display_name': response.get(u'display_name'), + u'name': response.get(u'name'), + u'push_policy': _push_policy_convert_from_response( + response.get('push_policy')), + u'topic_urn': response.get(u'topic_urn'), + u'update_time': response.get(u'update_time') + } + + +def _push_policy_convert_from_response(value): + return { + 0: "the message sending fails and is cached in the queue", + 1: "the failed message is discarded", + }.get(int(value)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py new file mode 100644 index 000000000..9fc0361b3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py @@ -0,0 +1,884 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_eip +description: + - elastic ip management. +short_description: Creates a resource of Vpc/EIP in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '5m' + update: + description: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or + by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character + string, default value bandwidth is used. For IPv6 + addresses, the default parameter value is bandwidth + outside China and is traffic in China. + type: str + required: true + name: + description: + - Specifies the bandwidth name. The value is a string of 1 + to 64 characters that can contain letters, digits, + underscores C(_), hyphens (-), and periods (.). + type: str + required: true + size: + description: + - Specifies the bandwidth size. The value ranges from 1 + Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You + can see the bandwidth range of each region on the + management console.) The minimum unit for bandwidth + adjustment varies depending on the bandwidth range. The + details are as follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth + size ranges from 0 to 300 Mbit/s (with 300 Mbit/s + included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth + size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth + size is greater than 1000 Mbit/s. + type: int + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this + parameter is left blank, an IPv4 address will be assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns + an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a + private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create an eip and bind it to a port +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a port + hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" + register: port +- name: Create an eip and bind it to a port + community.general.hwc_vpc_eip: + type: "5_bgp" + dedicated_bandwidth: + charge_mode: "traffic" + name: "ansible_test_dedicated_bandwidth" + size: 1 + port_id: "{{ port.id }}" +''' + +RETURN = ''' + type: + description: + - Specifies the EIP type. + type: str + returned: success + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or + by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character + string, default value bandwidth is used. For IPv6 + addresses, the default parameter value is bandwidth + outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 + to 64 characters that can contain letters, digits, + underscores C(_), hyphens (-), and periods (.). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 + Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You + can see the bandwidth range of each region on the + management console.) The minimum unit for bandwidth + adjustment varies depending on the bandwidth range. The + details are as follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth + size ranges from 0 to 300 Mbit/s (with 300 Mbit/s + included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth + size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth + size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this + parameter is left blank, an IPv4 address will be assigned. + type: int + returned: success + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns + an EIP if you do not specify it. + type: str + returned: success + port_id: + description: + - Specifies the port ID. This parameter is returned only when a + private IP address is bound with the EIP. + type: str + returned: success + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success + create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success + ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success + private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This + parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='5m', type='str'), + update=dict(default='5m', type='str'), + ), default=dict()), + type=dict(type='str', required=True), + dedicated_bandwidth=dict(type='dict', options=dict( + charge_mode=dict(type='str', required=True), + name=dict(type='str', required=True), + size=dict(type='int', required=True) + )), + enterprise_project_id=dict(type='str'), + ip_version=dict(type='int'), + ipv4_address=dict(type='str'), + port_id=dict(type='str'), + shared_bandwidth_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "dedicated_bandwidth": module.params.get("dedicated_bandwidth"), + "enterprise_project_id": module.params.get("enterprise_project_id"), + "ip_version": module.params.get("ip_version"), + "ipv4_address": module.params.get("ipv4_address"), + "port_id": module.params.get("port_id"), + "shared_bandwidth_id": module.params.get("shared_bandwidth_id"), + "type": module.params.get("type"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["publicip", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + if module.params["port_id"]: + module.params["port_id"] = "" + update(config) + + send_delete_request(module, None, client) + + url = build_path(module, "publicips/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["ip_version"]) + if v: + query_params.append("ip_version=" + str(v)) + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "publicips" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_bandwidth(opts, None) + if not is_empty_value(v): + params["bandwidth"] = v + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = expand_create_publicip(opts, None) + if not is_empty_value(v): + params["publicip"] = v + + return params + + +def expand_create_bandwidth(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + if not (v or sbwid): + raise Exception("must input shared_bandwidth_id or " + "dedicated_bandwidth") + + if sbwid: + return { + "id": sbwid, + "share_type": "WHOLE"} + + return { + "charge_mode": v["charge_mode"], + "name": v["name"], + "share_type": "PER", + "size": v["size"]} + + +def expand_create_publicip(d, array_index): + r = dict() + + v = navigate_value(d, ["ipv4_address"], array_index) + if not is_empty_value(v): + r["ip_address"] = v + + v = navigate_value(d, ["ip_version"], array_index) + if not is_empty_value(v): + r["ip_version"] = v + + v = navigate_value(d, ["type"], array_index) + if not is_empty_value(v): + r["type"] = v + + return r + + +def send_create_request(module, params, client): + url = "publicips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "publicip_id": ["publicip", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "publicips/{publicip_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_version"], None) + if not is_empty_value(v): + params["ip_version"] = v + + v = navigate_value(opts, ["port_id"], None) + if v is not None: + params["port_id"] = v + + if not params: + return params + + params = {"publicip": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + url = build_path(module, "publicips/{id}") + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["publicip", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + None, + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_eip): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "publicips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "publicips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + if not exclude_output: + v = navigate_value(response, ["read", "create_time"], array_index) + r["create_time"] = v + + v = r.get("dedicated_bandwidth") + v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output) + r["dedicated_bandwidth"] = v + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "ip_version"], array_index) + r["ip_version"] = v + + v = navigate_value(response, ["read", "public_ip_address"], array_index) + r["ipv4_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "public_ipv6_address"], + array_index) + r["ipv6_address"] = v + + v = navigate_value(response, ["read", "port_id"], array_index) + r["port_id"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "private_ip_address"], + array_index) + r["private_ip_address"] = v + + v = r.get("shared_bandwidth_id") + v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output) + r["shared_bandwidth_id"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + return r + + +def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + if not (v and v == "PER"): + return current_value + + result = current_value + if not result: + result = dict() + + if not exclude_output: + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + if v is not None: + result["id"] = v + + v = navigate_value(d, ["read", "bandwidth_name"], array_index) + if v is not None: + result["name"] = v + + v = navigate_value(d, ["read", "bandwidth_size"], array_index) + if v is not None: + result["size"] = v + + return result if result else current_value + + +def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output): + v = navigate_value(d, ["read", "bandwidth_id"], array_index) + + v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index) + + return v if (v1 and v1 == "WHOLE") else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_eip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["publicips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_bandwidth_id(all_opts, None) + result["bandwidth_id"] = v + + v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None) + result["bandwidth_name"] = v + + result["bandwidth_share_type"] = None + + v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None) + result["bandwidth_size"] = v + + result["create_time"] = None + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["ip_version"], None) + result["ip_version"] = v + + v = navigate_value(all_opts, ["port_id"], None) + result["port_id"] = v + + result["private_ip_address"] = None + + v = navigate_value(all_opts, ["ipv4_address"], None) + result["public_ip_address"] = v + + result["public_ipv6_address"] = None + + result["status"] = None + + result["tenant_id"] = None + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + return result + + +def expand_list_bandwidth_id(d, array_index): + v = navigate_value(d, ["dedicated_bandwidth"], array_index) + sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index) + if v and sbwid: + raise Exception("don't input shared_bandwidth_id and " + "dedicated_bandwidth at same time") + + return sbwid + + +def fill_list_resp_body(body): + result = dict() + + result["bandwidth_id"] = body.get("bandwidth_id") + + result["bandwidth_name"] = body.get("bandwidth_name") + + result["bandwidth_share_type"] = body.get("bandwidth_share_type") + + result["bandwidth_size"] = body.get("bandwidth_size") + + result["create_time"] = body.get("create_time") + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["ip_version"] = body.get("ip_version") + + result["port_id"] = body.get("port_id") + + result["private_ip_address"] = body.get("private_ip_address") + + result["public_ip_address"] = body.get("public_ip_address") + + result["public_ipv6_address"] = body.get("public_ipv6_address") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + result["type"] = body.get("type") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py new file mode 100644 index 000000000..2d6832ce5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py @@ -0,0 +1,698 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or SPDX-License-Identifier: GPL-3.0-or-later +# https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_peering_connect +description: + - vpc peering management. +short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can + contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc + belongs to. + type: str + required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a peering connect +- name: Create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: Create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: Create a peering connect + community.general.hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + peering_vpc: + vpc_id: "{{ vpc2.id }}" +''' + +RETURN = ''' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success + name: + description: + - Specifies the name of the VPC peering connection. The value can + contain 1 to 64 characters. + type: str + returned: success + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc + belongs to. + type: str + returned: success + description: + description: + - The description of vpc peering connection. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + local_vpc_id=dict(type='str', required=True), + name=dict(type='str', required=True), + peering_vpc=dict(type='dict', required=True, options=dict( + vpc_id=dict(type='str', required=True), + project_id=dict(type='str') + )), + description=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "local_vpc_id": module.params.get("local_vpc_id"), + "name": module.params.get("name"), + "peering_vpc": module.params.get("peering_vpc"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["peering", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "v2.0/vpc/peerings/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["local_vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/peerings" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = expand_create_accept_vpc_info(opts, None) + if not is_empty_value(v): + params["accept_vpc_info"] = v + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_request_vpc_info(opts, None) + if not is_empty_value(v): + params["request_vpc_info"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def expand_create_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + if not is_empty_value(v): + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def expand_create_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = "" + + v = navigate_value(d, ["local_vpc_id"], array_index) + if not is_empty_value(v): + r["vpc_id"] = v + + return r + + +def send_create_request(module, params, client): + url = "v2.0/vpc/peerings" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "peering_id": ["peering", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["peering", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["PENDING_ACCEPTANCE"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_peering_connect): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + if not params: + return params + + params = {"peering": params} + + return params + + +def send_update_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/peerings/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peering"], None) + + +def fill_read_resp_body(body): + result = dict() + + v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_read_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_read_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"], + array_index) + r["local_vpc_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = r.get("peering_vpc") + v = flatten_peering_vpc(response, array_index, v, exclude_output) + r["peering_vpc"] = v + + return r + + +def flatten_peering_vpc(d, array_index, current_value, exclude_output): + result = current_value + has_init_value = True + if not result: + result = dict() + has_init_value = False + + v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"], + array_index) + result["project_id"] = v + + v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index) + result["vpc_id"] = v + + if has_init_value: + return result + + for v in result.values(): + if v is not None: + return result + return current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_peering_connect): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["peerings"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = expand_list_accept_vpc_info(all_opts, None) + result["accept_vpc_info"] = v + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = expand_list_request_vpc_info(all_opts, None) + result["request_vpc_info"] = v + + result["status"] = None + + return result + + +def expand_list_accept_vpc_info(d, array_index): + r = dict() + + v = navigate_value(d, ["peering_vpc", "project_id"], array_index) + r["tenant_id"] = v + + v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def expand_list_request_vpc_info(d, array_index): + r = dict() + + r["tenant_id"] = None + + v = navigate_value(d, ["local_vpc_id"], array_index) + r["vpc_id"] = v + + for v in r.values(): + if v is not None: + return r + return None + + +def fill_list_resp_body(body): + result = dict() + + v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info")) + result["accept_vpc_info"] = v + + result["description"] = body.get("description") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_request_vpc_info(body.get("request_vpc_info")) + result["request_vpc_info"] = v + + result["status"] = body.get("status") + + return result + + +def fill_list_resp_accept_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +def fill_list_resp_request_vpc_info(value): + if not value: + return None + + result = dict() + + result["tenant_id"] = value.get("tenant_id") + + result["vpc_id"] = value.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py new file mode 100644 index 000000000..2d830493d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py @@ -0,0 +1,1167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_port +description: + - vpc port management. +short_description: Creates a resource of Vpc/Port in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + elements: dict + suboptions: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. + Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured + for parameter allowed_address_pairs. + type: str + required: false + mac_address: + description: + - Specifies the MAC address. + type: str + required: false + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + elements: dict + required: false + suboptions: + name: + description: + - Specifies the option name. + type: str + required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 + characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + elements: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a port +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a port + community.general.hwc_vpc_port: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +''' + +RETURN = ''' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. + Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured + for parameter allowed_address_pairs. + type: str + returned: success + mac_address: + description: + - Specifies the MAC address. + type: str + returned: success + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success + ip_address: + description: + - Specifies the port IP address. + type: str + returned: success + name: + description: + - Specifies the port name. The value can contain no more than 255 + characters. + type: str + returned: success + security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success + mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + ), default=dict()), + subnet_id=dict(type='str', required=True), + admin_state_up=dict(type='bool'), + allowed_address_pairs=dict( + type='list', elements='dict', + options=dict( + ip_address=dict(type='str'), + mac_address=dict(type='str') + ), + ), + extra_dhcp_opts=dict(type='list', elements='dict', options=dict( + name=dict(type='str'), + value=dict(type='str') + )), + ip_address=dict(type='str'), + name=dict(type='str'), + security_groups=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "admin_state_up": module.params.get("admin_state_up"), + "allowed_address_pairs": module.params.get("allowed_address_pairs"), + "extra_dhcp_opts": module.params.get("extra_dhcp_opts"), + "ip_address": module.params.get("ip_address"), + "name": module.params.get("name"), + "security_groups": module.params.get("security_groups"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["port", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + send_update_request(module, params, client) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "ports/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + array_index = { + "read.fixed_ips": 0, + } + + return update_properties(module, res, array_index, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["subnet_id"]) + if v: + query_params.append("network_id=" + str(v)) + + v = navigate_value(opts, ["name"]) + if v: + query_params.append("name=" + str(v)) + + v = navigate_value(opts, ["admin_state_up"]) + if v: + query_params.append("admin_state_up=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "ports" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["admin_state_up"], None) + if not is_empty_value(v): + params["admin_state_up"] = v + + v = expand_create_allowed_address_pairs(opts, None) + if not is_empty_value(v): + params["allowed_address_pairs"] = v + + v = expand_create_extra_dhcp_opts(opts, None) + if not is_empty_value(v): + params["extra_dhcp_opts"] = v + + v = expand_create_fixed_ips(opts, None) + if not is_empty_value(v): + params["fixed_ips"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["network_id"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_create_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_create_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_create_request(module, params, client): + url = "ports" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "port_id": ["port", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "ports/{port_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["port", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE", "DOWN"], + ["BUILD"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_port): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = expand_update_allowed_address_pairs(opts, None) + if v is not None: + params["allowed_address_pairs"] = v + + v = expand_update_extra_dhcp_opts(opts, None) + if v is not None: + params["extra_dhcp_opts"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["security_groups"], None) + if not is_empty_value(v): + params["security_groups"] = v + + if not params: + return params + + params = {"port": params} + + return params + + +def expand_update_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + if not is_empty_value(v): + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + if not is_empty_value(v): + transformed["mac_address"] = v + + if transformed: + req.append(transformed) + + return req + + +def expand_update_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + if not v: + return req + n = len(v) + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + if not is_empty_value(v): + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + if not is_empty_value(v): + transformed["opt_value"] = v + + if transformed: + req.append(transformed) + + return req + + +def send_update_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "ports/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "ports/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["port"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_read_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_read_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_read_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_read_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "admin_state_up"], array_index) + r["admin_state_up"] = v + + v = r.get("allowed_address_pairs") + v = flatten_allowed_address_pairs(response, array_index, v, exclude_output) + r["allowed_address_pairs"] = v + + v = r.get("extra_dhcp_opts") + v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output) + r["extra_dhcp_opts"] = v + + v = navigate_value(response, ["read", "fixed_ips", "ip_address"], + array_index) + r["ip_address"] = v + + if not exclude_output: + v = navigate_value(response, ["read", "mac_address"], array_index) + r["mac_address"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "security_groups"], array_index) + r["security_groups"] = v + + v = navigate_value(response, ["read", "network_id"], array_index) + r["subnet_id"] = v + + return r + + +def flatten_allowed_address_pairs(d, array_index, + current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "allowed_address_pairs"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.allowed_address_pairs"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"], + new_array_index) + val["ip_address"] = v + + v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"], + new_array_index) + val["mac_address"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "extra_dhcp_opts"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.extra_dhcp_opts"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"], + new_array_index) + val["name"] = v + + v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"], + new_array_index) + val["value"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_port): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["ports"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["admin_state_up"], None) + result["admin_state_up"] = v + + v = expand_list_allowed_address_pairs(all_opts, None) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = None + + result["binding_vnic_type"] = None + + result["device_id"] = None + + result["device_owner"] = None + + result["dns_name"] = None + + v = expand_list_extra_dhcp_opts(all_opts, None) + result["extra_dhcp_opts"] = v + + v = expand_list_fixed_ips(all_opts, None) + result["fixed_ips"] = v + + result["id"] = None + + result["mac_address"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["network_id"] = v + + v = navigate_value(all_opts, ["security_groups"], None) + result["security_groups"] = v + + result["status"] = None + + result["tenant_id"] = None + + return result + + +def expand_list_allowed_address_pairs(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["allowed_address_pairs"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["allowed_address_pairs"] = i + transformed = dict() + + v = navigate_value(d, ["allowed_address_pairs", "ip_address"], + new_array_index) + transformed["ip_address"] = v + + v = navigate_value(d, ["allowed_address_pairs", "mac_address"], + new_array_index) + transformed["mac_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_extra_dhcp_opts(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + v = navigate_value(d, ["extra_dhcp_opts"], + new_array_index) + + n = len(v) if v else 1 + for i in range(n): + new_array_index["extra_dhcp_opts"] = i + transformed = dict() + + v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index) + transformed["opt_name"] = v + + v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index) + transformed["opt_value"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def expand_list_fixed_ips(d, array_index): + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + req = [] + + n = 1 + for i in range(n): + transformed = dict() + + v = navigate_value(d, ["ip_address"], new_array_index) + transformed["ip_address"] = v + + for v in transformed.values(): + if v is not None: + req.append(transformed) + break + + return req if req else None + + +def fill_list_resp_body(body): + result = dict() + + result["admin_state_up"] = body.get("admin_state_up") + + v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs")) + result["allowed_address_pairs"] = v + + result["binding_host_id"] = body.get("binding_host_id") + + result["binding_vnic_type"] = body.get("binding_vnic_type") + + result["device_id"] = body.get("device_id") + + result["device_owner"] = body.get("device_owner") + + result["dns_name"] = body.get("dns_name") + + v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts")) + result["extra_dhcp_opts"] = v + + v = fill_list_resp_fixed_ips(body.get("fixed_ips")) + result["fixed_ips"] = v + + result["id"] = body.get("id") + + result["mac_address"] = body.get("mac_address") + + result["name"] = body.get("name") + + result["network_id"] = body.get("network_id") + + result["security_groups"] = body.get("security_groups") + + result["status"] = body.get("status") + + result["tenant_id"] = body.get("tenant_id") + + return result + + +def fill_list_resp_allowed_address_pairs(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + val["mac_address"] = item.get("mac_address") + + result.append(val) + + return result + + +def fill_list_resp_extra_dhcp_opts(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["opt_name"] = item.get("opt_name") + + val["opt_value"] = item.get("opt_value") + + result.append(val) + + return result + + +def fill_list_resp_fixed_ips(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["ip_address"] = item.get("ip_address") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py new file mode 100644 index 000000000..c57ddc670 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py @@ -0,0 +1,360 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_private_ip +description: + - vpc private ip management. +short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. + - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are + assigned. Cannot be changed after creating the private ip. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP + address in the subnet. If it is not specified, the system + automatically assigns an IP address. Cannot be changed after + creating the private ip. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a private ip +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + hwc_vpc_subnet: + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + register: subnet +- name: Create a private ip + community.general.hwc_vpc_private_ip: + subnet_id: "{{ subnet.id }}" + ip_address: "192.168.100.33" +''' + +RETURN = ''' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are + assigned. + type: str + returned: success + ip_address: + description: + - Specifies the target IP address. The value can be an available IP + address in the subnet. If it is not specified, the system + automatically assigns an IP address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + subnet_id=dict(type='str', required=True), + ip_address=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s)of an" + " existing resource.(%s)" % (current, expect, module.params.get('id'))) + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "ip_address": module.params.get("ip_address"), + "subnet_id": module.params.get("subnet_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["privateips", "id"], + {"privateips": 0}) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = build_path(module, "subnets/{subnet_id}/privateips") + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["ip_address"], None) + if not is_empty_value(v): + params["ip_address"] = v + + v = navigate_value(opts, ["subnet_id"], None) + if not is_empty_value(v): + params["subnet_id"] = v + + if not params: + return params + + params = {"privateips": [params]} + + return params + + +def send_create_request(module, params, client): + url = "privateips" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "privateips/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "privateips/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateip"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "ip_address"], array_index) + r["ip_address"] = v + + v = navigate_value(response, ["read", "subnet_id"], array_index) + r["subnet_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_private_ip): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["privateips"], None) + + +def _build_identity_object(all_opts): + result = dict() + + result["id"] = None + + v = navigate_value(all_opts, ["ip_address"], None) + result["ip_address"] = v + + v = navigate_value(all_opts, ["subnet_id"], None) + result["subnet_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["id"] = body.get("id") + + result["ip_address"] = body.get("ip_address") + + result["subnet_id"] = body.get("subnet_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py new file mode 100644 index 000000000..1612cac50 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_route +description: + - vpc route management. +short_description: Creates a resource of Vpc/Route in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. + - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module will create a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a peering connect +- name: Create a local vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_local" + register: vpc1 +- name: Create a peering vpc + hwc_network_vpc: + cidr: "192.168.0.0/16" + name: "ansible_network_vpc_test_peering" + register: vpc2 +- name: Create a peering connect + hwc_vpc_peering_connect: + local_vpc_id: "{{ vpc1.id }}" + name: "ansible_network_peering_test" + filters: + - "name" + peering_vpc: + vpc_id: "{{ vpc2.id }}" + register: connect +- name: Create a route + community.general.hwc_vpc_route: + vpc_id: "{{ vpc1.id }}" + destination: "192.168.0.0/16" + next_hop: "{{ connect.id }}" +''' + +RETURN = ''' + id: + description: + - UUID of the route. + type: str + returned: success + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success + type: + description: + - Specifies the type of route. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + destination=dict(type='str', required=True), + next_hop=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + type=dict(type='str', default='peering'), + id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = get_resource_by_id(config) + if module.params['state'] == 'present': + opts = user_input_parameters(module) + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing route.(%s)" % (resource, opts, + config.module.params.get( + 'id'))) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "destination": module.params.get("destination"), + "next_hop": module.params.get("next_hop"), + "type": module.params.get("type"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["route", "id"]) + + result = update_properties(module, {"read": fill_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "network", "project") + + send_delete_request(module, None, client) + + +def get_resource_by_id(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "network", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_resp_body(r) + + result = update_properties(module, res, None, exclude_output) + return result + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["type"]) + if v: + query_params.append("type=" + str(v)) + + v = navigate_value(opts, ["destination"]) + if v: + query_params.append("destination=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "network", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "v2.0/vpc/routes" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["destination"], None) + if not is_empty_value(v): + params["destination"] = v + + v = navigate_value(opts, ["next_hop"], None) + if not is_empty_value(v): + params["nexthop"] = v + + v = navigate_value(opts, ["type"], None) + if not is_empty_value(v): + params["type"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"route": params} + + return params + + +def send_create_request(module, params, client): + url = "v2.0/vpc/routes" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "v2.0/vpc/routes/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["route"], None) + + +def fill_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "destination"], array_index) + r["destination"] = v + + v = navigate_value(response, ["read", "nexthop"], array_index) + r["next_hop"] = v + + v = navigate_value(response, ["read", "type"], array_index) + r["type"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_route): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["routes"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["destination"], None) + result["destination"] = v + + v = navigate_value(all_opts, ["id"], None) + result["id"] = v + + v = navigate_value(all_opts, ["next_hop"], None) + result["nexthop"] = v + + v = navigate_value(all_opts, ["type"], None) + result["type"] = v + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["destination"] = body.get("destination") + + result["id"] = body.get("id") + + result["nexthop"] = body.get("nexthop") + + result["type"] = body.get("type") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py new file mode 100644 index 000000000..c210b912d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py @@ -0,0 +1,650 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_security_group +description: + - vpc security group management. +short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over I(name), + I(enterprise_project_id) and I(vpc_id) for security group selection. + - I(name), I(enterprise_project_id) and I(vpc_id) are used for security + group selection. If more than one security group with this options exists, + execution is aborted. + - No parameter support updating. If one of option is changed, the module + will create a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to + 64 characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security + group, associate the enterprise project ID with the security + group.s + type: str + required: false + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group + belongs. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a security group +- name: Create a security group + community.general.hwc_vpc_security_group: + name: "ansible_network_security_group_test" +''' + +RETURN = ''' + name: + description: + - Specifies the security group name. The value is a string of 1 to + 64 characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + returned: success + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security + group, associate the enterprise project ID with the security + group. + type: str + returned: success + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group + belongs. + type: str + returned: success + rules: + description: + - Specifies the security group rule, which ensures that resources + in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security + group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can + be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 + or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to + 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value + indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 + to 65535. The value cannot be greater than the + port_range_max value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, + udp, or others. If the parameter is left blank, the + security group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control + direction is set to egress, the parameter specifies the + source IP address. If the access control direction is set + to ingress, the parameter specifies the destination IP + address. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + name=dict(type='str', required=True), + enterprise_project_id=dict(type='str'), + vpc_id=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get("id"): + resource = read_resource(config) + if module.params['state'] == 'present': + check_resource_option(resource, module) + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = update_properties(module, {"read": v[0]}, None) + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + resource = create(config) + changed = True + + result = resource + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "enterprise_project_id": module.params.get("enterprise_project_id"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + "id": module.params.get("id"), + } + + +def check_resource_option(resource, module): + opts = user_input_parameters(module) + + resource = { + "enterprise_project_id": resource.get("enterprise_project_id"), + "name": resource.get("name"), + "vpc_id": resource.get("vpc_id"), + "id": resource.get("id"), + } + + if are_different_dicts(resource, opts): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (resource, opts, + module.params.get('id'))) + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group", "id"]) + + result = update_properties(module, {"read": fill_read_resp_body(r)}, None) + return result + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_params = [] + + v = navigate_value(opts, ["enterprise_project_id"]) + if v: + query_params.append("enterprise_project_id=" + str(v)) + + v = navigate_value(opts, ["vpc_id"]) + if v: + query_params.append("vpc_id=" + str(v)) + + query_link = "?marker={marker}&limit=10" + if query_params: + query_link += "&" + "&".join(query_params) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-groups" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["enterprise_project_id"], None) + if not is_empty_value(v): + params["enterprise_project_id"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"security_group": params} + + return params + + +def send_create_request(module, params, client): + url = "security-groups" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-groups/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-groups/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_read_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_read_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "enterprise_project_id"], + array_index) + r["enterprise_project_id"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + if not exclude_output: + v = r.get("rules") + v = flatten_rules(response, array_index, v, exclude_output) + r["rules"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + v = navigate_value(response, ["read", "id"], array_index) + r["id"] = v + + return r + + +def flatten_rules(d, array_index, current_value, exclude_output): + n = 0 + result = current_value + has_init_value = True + if result: + n = len(result) + else: + has_init_value = False + result = [] + v = navigate_value(d, ["read", "security_group_rules"], + array_index) + if not v: + return current_value + n = len(v) + + new_array_index = dict() + if array_index: + new_array_index.update(array_index) + + for i in range(n): + new_array_index["read.security_group_rules"] = i + + val = dict() + if len(result) >= (i + 1) and result[i]: + val = result[i] + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "description"], + new_array_index) + val["description"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "direction"], + new_array_index) + val["direction"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "ethertype"], + new_array_index) + val["ethertype"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "id"], + new_array_index) + val["id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_max"], + new_array_index) + val["port_range_max"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "port_range_min"], + new_array_index) + val["port_range_min"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "protocol"], + new_array_index) + val["protocol"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"], + new_array_index) + val["remote_address_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"], + new_array_index) + val["remote_group_id"] = v + + if not exclude_output: + v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"], + new_array_index) + val["remote_ip_prefix"] = v + + if len(result) >= (i + 1): + result[i] = val + else: + for v in val.values(): + if v is not None: + result.append(val) + break + + return result if (has_init_value or result) else current_value + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_groups"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["enterprise_project_id"], None) + result["enterprise_project_id"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["security_group_rules"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["enterprise_project_id"] = body.get("enterprise_project_id") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + v = fill_list_resp_security_group_rules(body.get("security_group_rules")) + result["security_group_rules"] = v + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def fill_list_resp_security_group_rules(value): + if not value: + return None + + result = [] + for item in value: + val = dict() + + val["description"] = item.get("description") + + val["direction"] = item.get("direction") + + val["ethertype"] = item.get("ethertype") + + val["id"] = item.get("id") + + val["port_range_max"] = item.get("port_range_max") + + val["port_range_min"] = item.get("port_range_min") + + val["protocol"] = item.get("protocol") + + val["remote_address_group_id"] = item.get("remote_address_group_id") + + val["remote_group_id"] = item.get("remote_group_id") + + val["remote_ip_prefix"] = item.get("remote_ip_prefix") + + val["security_group_id"] = item.get("security_group_id") + + result.append(val) + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py new file mode 100644 index 000000000..bfb5d6a61 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_security_group_rule +description: + - vpc security group management. +short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud +notes: + - If I(id) option is provided, it takes precedence over + I(enterprise_project_id) for security group rule selection. + - I(security_group_id) is used for security group rule selection. If more + than one security group rule with this options exists, execution is + aborted. + - No parameter support updating. If one of option is changed, the module + will create a new resource. +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be + egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies + the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. + The value is a string of no more than 255 characters that can + contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + If you do not set this parameter, IPv4 is used by default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. + If the protocol is not icmp, the value cannot be smaller than the + port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to + 65535. The value cannot be greater than the port_range_max value. + An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. + If the parameter is left blank, the security group supports all + protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is + exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction + is set to egress, the parameter specifies the source IP address. + If the access control direction is set to ingress, the parameter + specifies the destination IP address. The value can be in the + CIDR format or IP addresses. The parameter is exclusive with + parameter remote_group_id. + type: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create a security group rule +- name: Create a security group + hwc_vpc_security_group: + name: "ansible_network_security_group_test" + register: sg +- name: Create a security group rule + community.general.hwc_vpc_security_group_rule: + direction: "ingress" + protocol: "tcp" + ethertype: "IPv4" + port_range_max: 22 + security_group_id: "{{ sg.id }}" + port_range_min: 22 + remote_ip_prefix: "0.0.0.0/0" +''' + +RETURN = ''' + direction: + description: + - Specifies the direction of access control. The value can be + egress or ingress. + type: str + returned: success + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies + the security group rule. + type: str + returned: success + description: + description: + - Provides supplementary information about the security group rule. + The value is a string of no more than 255 characters that can + contain letters and digits. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + If you do not set this parameter, IPv4 is used by default. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. + If the protocol is not icmp, the value cannot be smaller than the + port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to + 65535. The value cannot be greater than the port_range_max value. + An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. + If the parameter is left blank, the security group supports all + protocols. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is + exclusive with parameter remote_ip_prefix. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction + is set to egress, the parameter specifies the source IP address. + If the access control direction is set to ingress, the parameter + specifies the destination IP address. The value can be in the + CIDR format or IP addresses. The parameter is exclusive with + parameter remote_group_id. + type: str + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcModule, are_different_dicts, build_path, + get_region, is_empty_value, navigate_value) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + direction=dict(type='str', required=True), + security_group_id=dict(type='str', required=True), + description=dict(type='str'), + ethertype=dict(type='str'), + port_range_max=dict(type='int'), + port_range_min=dict(type='int'), + protocol=dict(type='str'), + remote_group_id=dict(type='str'), + remote_ip_prefix=dict(type='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params['id']: + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + raise Exception( + "Cannot change option from (%s) to (%s) for an" + " existing security group(%s)." % (current, expect, module.params.get('id'))) + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "description": module.params.get("description"), + "direction": module.params.get("direction"), + "ethertype": module.params.get("ethertype"), + "port_range_max": module.params.get("port_range_max"), + "port_range_min": module.params.get("port_range_min"), + "protocol": module.params.get("protocol"), + "remote_group_id": module.params.get("remote_group_id"), + "remote_ip_prefix": module.params.get("remote_ip_prefix"), + "security_group_id": module.params.get("security_group_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + module.params['id'] = navigate_value(r, ["security_group_rule", "id"]) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["security_group_id"]) + if v: + query_link += "&security_group_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "security-group-rules" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["description"], None) + if not is_empty_value(v): + params["description"] = v + + v = navigate_value(opts, ["direction"], None) + if not is_empty_value(v): + params["direction"] = v + + v = navigate_value(opts, ["ethertype"], None) + if not is_empty_value(v): + params["ethertype"] = v + + v = navigate_value(opts, ["port_range_max"], None) + if not is_empty_value(v): + params["port_range_max"] = v + + v = navigate_value(opts, ["port_range_min"], None) + if not is_empty_value(v): + params["port_range_min"] = v + + v = navigate_value(opts, ["protocol"], None) + if not is_empty_value(v): + params["protocol"] = v + + v = navigate_value(opts, ["remote_group_id"], None) + if not is_empty_value(v): + params["remote_group_id"] = v + + v = navigate_value(opts, ["remote_ip_prefix"], None) + if not is_empty_value(v): + params["remote_ip_prefix"] = v + + v = navigate_value(opts, ["security_group_id"], None) + if not is_empty_value(v): + params["security_group_id"] = v + + if not params: + return params + + params = {"security_group_rule": params} + + return params + + +def send_create_request(module, params, client): + url = "security-group-rules" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_delete_request(module, params, client): + url = build_path(module, "security-group-rules/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "security-group-rules/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rule"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "description"], array_index) + r["description"] = v + + v = navigate_value(response, ["read", "direction"], array_index) + r["direction"] = v + + v = navigate_value(response, ["read", "ethertype"], array_index) + r["ethertype"] = v + + v = navigate_value(response, ["read", "port_range_max"], array_index) + r["port_range_max"] = v + + v = navigate_value(response, ["read", "port_range_min"], array_index) + r["port_range_min"] = v + + v = navigate_value(response, ["read", "protocol"], array_index) + r["protocol"] = v + + v = navigate_value(response, ["read", "remote_group_id"], array_index) + r["remote_group_id"] = v + + v = navigate_value(response, ["read", "remote_ip_prefix"], array_index) + r["remote_ip_prefix"] = v + + v = navigate_value(response, ["read", "security_group_id"], array_index) + r["security_group_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_security_group_rule): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["security_group_rules"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["description"], None) + result["description"] = v + + v = navigate_value(all_opts, ["direction"], None) + result["direction"] = v + + v = navigate_value(all_opts, ["ethertype"], None) + result["ethertype"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["port_range_max"], None) + result["port_range_max"] = v + + v = navigate_value(all_opts, ["port_range_min"], None) + result["port_range_min"] = v + + v = navigate_value(all_opts, ["protocol"], None) + result["protocol"] = v + + result["remote_address_group_id"] = None + + v = navigate_value(all_opts, ["remote_group_id"], None) + result["remote_group_id"] = v + + v = navigate_value(all_opts, ["remote_ip_prefix"], None) + result["remote_ip_prefix"] = v + + v = navigate_value(all_opts, ["security_group_id"], None) + result["security_group_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["description"] = body.get("description") + + result["direction"] = body.get("direction") + + result["ethertype"] = body.get("ethertype") + + result["id"] = body.get("id") + + result["port_range_max"] = body.get("port_range_max") + + result["port_range_min"] = body.get("port_range_min") + + result["protocol"] = body.get("protocol") + + result["remote_address_group_id"] = body.get("remote_address_group_id") + + result["remote_group_id"] = body.get("remote_group_id") + + result["remote_ip_prefix"] = body.get("remote_ip_prefix") + + result["security_group_id"] = body.get("security_group_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py new file mode 100644 index 000000000..7fb107f53 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py @@ -0,0 +1,741 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019 Huawei +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +############################################################################### +# Documentation +############################################################################### + +DOCUMENTATION = ''' +--- +module: hwc_vpc_subnet +description: + - subnet management. +short_description: Creates a resource of Vpc/Subnet in Huawei Cloud +version_added: '0.2.0' +author: Huawei Inc. (@huaweicloud) +requirements: + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: + description: + - The timeouts for create operation. + type: str + default: '15m' + update: + description: + - The timeouts for update operation. + type: str + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC + CIDR block and be in CIDR format. The subnet mask cannot be + greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP + address in the subnet. Cannot be changed after creating the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 + characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot + be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed + after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can + be true (enabled) or false(disabled), and default value is true. + If this parameter is set to false, newly created ECSs cannot + obtain IP addresses, and usernames and passwords cannot be + injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address + in the head will be used first. + type: list + elements: str + required: false +extends_documentation_fragment: + - community.general.hwc + - community.general.attributes + +''' + +EXAMPLES = ''' +# create subnet +- name: Create vpc + hwc_network_vpc: + cidr: "192.168.100.0/24" + name: "ansible_network_vpc_test" + register: vpc +- name: Create subnet + community.general.hwc_vpc_subnet: + vpc_id: "{{ vpc.id }}" + cidr: "192.168.100.0/26" + gateway_ip: "192.168.100.32" + name: "ansible_network_subnet_test" + dhcp_enable: true +''' + +RETURN = ''' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC + CIDR block and be in CIDR format. The subnet mask cannot be + greater than 28. + type: str + returned: success + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP + address in the subnet. + type: str + returned: success + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 + characters that can contain letters, digits, underscores C(_), + hyphens (-), and periods (.). + type: str + returned: success + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can + be true (enabled) or false(disabled), and default value is true. + If this parameter is set to false, newly created ECSs cannot + obtain IP addresses, and usernames and passwords cannot be + injected using Cloud-init. + type: bool + returned: success + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address + in the head will be used first. + type: list + returned: success +''' + +from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( + Config, HwcClientException, HwcClientException404, HwcModule, + are_different_dicts, build_path, get_region, is_empty_value, + navigate_value, wait_to_finish) + + +def build_module(): + return HwcModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], + type='str'), + timeouts=dict(type='dict', options=dict( + create=dict(default='15m', type='str'), + update=dict(default='15m', type='str'), + ), default=dict()), + cidr=dict(type='str', required=True), + gateway_ip=dict(type='str', required=True), + name=dict(type='str', required=True), + vpc_id=dict(type='str', required=True), + availability_zone=dict(type='str'), + dhcp_enable=dict(type='bool'), + dns_address=dict(type='list', elements='str') + ), + supports_check_mode=True, + ) + + +def main(): + """Main function""" + + module = build_module() + config = Config(module, "vpc") + + try: + resource = None + if module.params.get('id'): + resource = True + else: + v = search_resource(config) + if len(v) > 1: + raise Exception("Found more than one resource(%s)" % ", ".join([ + navigate_value(i, ["id"]) for i in v])) + + if len(v) == 1: + resource = v[0] + module.params['id'] = navigate_value(resource, ["id"]) + + result = {} + changed = False + if module.params['state'] == 'present': + if resource is None: + if not module.check_mode: + create(config) + changed = True + + current = read_resource(config, exclude_output=True) + expect = user_input_parameters(module) + if are_different_dicts(expect, current): + if not module.check_mode: + update(config) + changed = True + + result = read_resource(config) + result['id'] = module.params.get('id') + else: + if resource: + if not module.check_mode: + delete(config) + changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + else: + result['changed'] = changed + module.exit_json(**result) + + +def user_input_parameters(module): + return { + "availability_zone": module.params.get("availability_zone"), + "cidr": module.params.get("cidr"), + "dhcp_enable": module.params.get("dhcp_enable"), + "dns_address": module.params.get("dns_address"), + "gateway_ip": module.params.get("gateway_ip"), + "name": module.params.get("name"), + "vpc_id": module.params.get("vpc_id"), + } + + +def create(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_create_parameters(opts) + r = send_create_request(module, params, client) + obj = async_wait_create(config, r, client, timeout) + module.params['id'] = navigate_value(obj, ["subnet", "id"]) + + +def update(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + timeout = 60 * int(module.params['timeouts']['update'].rstrip('m')) + opts = user_input_parameters(module) + + params = build_update_parameters(opts) + if params: + r = send_update_request(module, params, client) + async_wait_update(config, r, client, timeout) + + +def delete(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + send_delete_request(module, None, client) + + url = build_path(module, "subnets/{id}") + + def _refresh_status(): + try: + client.get(url) + except HwcClientException404: + return True, "Done" + + except Exception: + return None, "" + + return True, "Pending" + + timeout = 60 * int(module.params['timeouts']['create'].rstrip('m')) + try: + wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(delete) to " + "be done, error= %s" % str(ex)) + + +def read_resource(config, exclude_output=False): + module = config.module + client = config.client(get_region(module), "vpc", "project") + + res = {} + + r = send_read_request(module, client) + res["read"] = fill_read_resp_body(r) + + return update_properties(module, res, None, exclude_output) + + +def _build_query_link(opts): + query_link = "?marker={marker}&limit=10" + v = navigate_value(opts, ["vpc_id"]) + if v: + query_link += "&vpc_id=" + str(v) + + return query_link + + +def search_resource(config): + module = config.module + client = config.client(get_region(module), "vpc", "project") + opts = user_input_parameters(module) + identity_obj = _build_identity_object(opts) + query_link = _build_query_link(opts) + link = "subnets" + query_link + + result = [] + p = {'marker': ''} + while True: + url = link.format(**p) + r = send_list_request(module, client, url) + if not r: + break + + for item in r: + item = fill_list_resp_body(item) + if not are_different_dicts(identity_obj, item): + result.append(item) + + if len(result) > 1: + break + + p['marker'] = r[-1].get('id') + + return result + + +def build_create_parameters(opts): + params = dict() + + v = navigate_value(opts, ["availability_zone"], None) + if not is_empty_value(v): + params["availability_zone"] = v + + v = navigate_value(opts, ["cidr"], None) + if not is_empty_value(v): + params["cidr"] = v + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_create_dns_list(opts, None) + if not is_empty_value(v): + params["dnsList"] = v + + v = navigate_value(opts, ["gateway_ip"], None) + if not is_empty_value(v): + params["gateway_ip"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_create_primary_dns(opts, None) + if not is_empty_value(v): + params["primary_dns"] = v + + v = expand_create_secondary_dns(opts, None) + if not is_empty_value(v): + params["secondary_dns"] = v + + v = navigate_value(opts, ["vpc_id"], None) + if not is_empty_value(v): + params["vpc_id"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_create_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v if (v and len(v) > 2) else [] + + +def expand_create_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_create_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_create_request(module, params, client): + url = "subnets" + try: + r = client.post(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(create), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_create(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(create) to " + "be done, error= %s" % str(ex)) + + +def build_update_parameters(opts): + params = dict() + + v = navigate_value(opts, ["dhcp_enable"], None) + if v is not None: + params["dhcp_enable"] = v + + v = expand_update_dns_list(opts, None) + if v is not None: + params["dnsList"] = v + + v = navigate_value(opts, ["name"], None) + if not is_empty_value(v): + params["name"] = v + + v = expand_update_primary_dns(opts, None) + if v is not None: + params["primary_dns"] = v + + v = expand_update_secondary_dns(opts, None) + if v is not None: + params["secondary_dns"] = v + + if not params: + return params + + params = {"subnet": params} + + return params + + +def expand_update_dns_list(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + if v: + if len(v) > 2: + return v + return None + return [] + + +def expand_update_primary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[0] if v else "" + + +def expand_update_secondary_dns(d, array_index): + v = navigate_value(d, ["dns_address"], array_index) + return v[1] if (v and len(v) > 1) else "" + + +def send_update_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.put(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(update), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def async_wait_update(config, result, client, timeout): + module = config.module + + path_parameters = { + "subnet_id": ["subnet", "id"], + } + data = dict((key, navigate_value(result, path)) + for key, path in path_parameters.items()) + + url = build_path(module, "subnets/{subnet_id}", data) + + def _query_status(): + r = None + try: + r = client.get(url, timeout=timeout) + except HwcClientException: + return None, "" + + try: + s = navigate_value(r, ["subnet", "status"]) + return r, s + except Exception: + return None, "" + + try: + return wait_to_finish( + ["ACTIVE"], + ["UNKNOWN"], + _query_status, timeout) + except Exception as ex: + module.fail_json(msg="module(hwc_vpc_subnet): error " + "waiting for api(update) to " + "be done, error= %s" % str(ex)) + + +def send_delete_request(module, params, client): + url = build_path(module, "vpcs/{vpc_id}/subnets/{id}") + + try: + r = client.delete(url, params) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(delete), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return r + + +def send_read_request(module, client): + url = build_path(module, "subnets/{id}") + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(read), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnet"], None) + + +def fill_read_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +def update_properties(module, response, array_index, exclude_output=False): + r = user_input_parameters(module) + + v = navigate_value(response, ["read", "availability_zone"], array_index) + r["availability_zone"] = v + + v = navigate_value(response, ["read", "cidr"], array_index) + r["cidr"] = v + + v = navigate_value(response, ["read", "dhcp_enable"], array_index) + r["dhcp_enable"] = v + + v = navigate_value(response, ["read", "dnsList"], array_index) + r["dns_address"] = v + + v = navigate_value(response, ["read", "gateway_ip"], array_index) + r["gateway_ip"] = v + + v = navigate_value(response, ["read", "name"], array_index) + r["name"] = v + + v = navigate_value(response, ["read", "vpc_id"], array_index) + r["vpc_id"] = v + + return r + + +def send_list_request(module, client, url): + + r = None + try: + r = client.get(url) + except HwcClientException as ex: + msg = ("module(hwc_vpc_subnet): error running " + "api(list), error: %s" % str(ex)) + module.fail_json(msg=msg) + + return navigate_value(r, ["subnets"], None) + + +def _build_identity_object(all_opts): + result = dict() + + v = navigate_value(all_opts, ["availability_zone"], None) + result["availability_zone"] = v + + v = navigate_value(all_opts, ["cidr"], None) + result["cidr"] = v + + v = navigate_value(all_opts, ["dhcp_enable"], None) + result["dhcp_enable"] = v + + v = navigate_value(all_opts, ["dns_address"], None) + result["dnsList"] = v + + v = navigate_value(all_opts, ["gateway_ip"], None) + result["gateway_ip"] = v + + result["id"] = None + + v = navigate_value(all_opts, ["name"], None) + result["name"] = v + + result["neutron_network_id"] = None + + result["neutron_subnet_id"] = None + + result["primary_dns"] = None + + result["secondary_dns"] = None + + result["status"] = None + + v = navigate_value(all_opts, ["vpc_id"], None) + result["vpc_id"] = v + + return result + + +def fill_list_resp_body(body): + result = dict() + + result["availability_zone"] = body.get("availability_zone") + + result["cidr"] = body.get("cidr") + + result["dhcp_enable"] = body.get("dhcp_enable") + + result["dnsList"] = body.get("dnsList") + + result["gateway_ip"] = body.get("gateway_ip") + + result["id"] = body.get("id") + + result["name"] = body.get("name") + + result["neutron_network_id"] = body.get("neutron_network_id") + + result["neutron_subnet_id"] = body.get("neutron_subnet_id") + + result["primary_dns"] = body.get("primary_dns") + + result["secondary_dns"] = body.get("secondary_dns") + + result["status"] = body.get("status") + + result["vpc_id"] = body.get("vpc_id") + + return result + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py b/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py new file mode 100644 index 000000000..774f29134 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_domain +short_description: Manages domains on IBM Spectrum Accelerate Family storage systems + +description: + - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + domain: + description: + - Name of the domain to be managed. + required: true + type: str + state: + description: + - The desired state of the domain. + default: "present" + choices: [ "present", "absent" ] + type: str + ldap_id: + description: + - ldap id to add to the domain. + required: false + type: str + size: + description: + - Size of the domain. + required: false + type: str + hard_capacity: + description: + - Hard capacity of the domain. + required: false + type: str + soft_capacity: + description: + - Soft capacity of the domain. + required: false + type: str + max_cgs: + description: + - Number of max cgs. + required: false + type: str + max_dms: + description: + - Number of max dms. + required: false + type: str + max_mirrors: + description: + - Number of max_mirrors. + required: false + type: str + max_pools: + description: + - Number of max_pools. + required: false + type: str + max_volumes: + description: + - Number of max_volumes. + required: false + type: str + perf_class: + description: + - Add the domain to a performance class. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Define new domain. + community.general.ibm_sa_domain: + domain: domain_name + size: domain_size + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete domain. + community.general.ibm_sa_domain: + domain: domain_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +msg: + description: module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + domain=dict(required=True), + size=dict(), + max_dms=dict(), + max_cgs=dict(), + ldap_id=dict(), + max_mirrors=dict(), + max_pools=dict(), + max_volumes=dict(), + perf_class=dict(), + hard_capacity=dict(), + soft_capacity=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + domain = xcli_client.cmd.domain_list( + domain=module.params['domain']).as_single_element + state = module.params['state'] + + state_changed = False + msg = 'Domain \'{0}\''.format(module.params['domain']) + if state == 'present' and not domain: + state_changed = execute_pyxcli_command( + module, 'domain_create', xcli_client) + msg += " created successfully." + elif state == 'absent' and domain: + state_changed = execute_pyxcli_command( + module, 'domain_delete', xcli_client) + msg += " deleted successfully." + else: + msg += " state unchanged." + + module.exit_json(changed=state_changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host.py new file mode 100644 index 000000000..614865ae0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_host.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_host +short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems + +description: + - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host state. + default: "present" + choices: [ "present", "absent" ] + type: str + cluster: + description: + - The name of the cluster to include the host. + required: false + type: str + domain: + description: + - The domains the cluster will be attached to. + To include more than one domain, + separate domain names with commas. + To include all existing domains, use an asterisk ("*"). + required: false + type: str + iscsi_chap_name: + description: + - The host's CHAP name identifier + required: false + type: str + iscsi_chap_secret: + description: + - The password of the initiator used to + authenticate to the system when CHAP is enable + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Define new host. + community.general.ibm_sa_host: + host: host_name + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete host. + community.general.ibm_sa_host: + host: host_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + cluster=dict(), + domain=dict(), + iscsi_chap_name=dict(), + iscsi_chap_secret=dict(no_log=True), + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + host = xcli_client.cmd.host_list( + host=module.params['host']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not host: + state_changed = execute_pyxcli_command( + module, 'host_define', xcli_client) + elif state == 'absent' and host: + state_changed = execute_pyxcli_command( + module, 'host_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py new file mode 100644 index 000000000..fdb27f85a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_host_ports +short_description: Add host ports on IBM Spectrum Accelerate Family storage systems + +description: + - "This module adds ports to or removes them from the hosts + on IBM Spectrum Accelerate Family storage systems." + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host ports state. + default: "present" + choices: [ "present", "absent" ] + type: str + iscsi_name: + description: + - iSCSI initiator name. + required: false + type: str + fcaddress: + description: + - Fiber channel address. + required: false + type: str + num_of_visible_targets: + description: + - Number of visible targets. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Add ports for host. + community.general.ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Remove ports for host. + community.general.ibm_sa_host_ports: + host: test_host + iscsi_name: iqn.1994-05.com*** + username: admin + password: secret + endpoints: hostdev-system + state: absent + +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, + spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + host=dict(required=True), + iscsi_name=dict(), + fcaddress=dict(), + num_of_visible_targets=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + ports = [] + try: + ports = xcli_client.cmd.host_list_ports( + host=module.params.get('host')).as_list + except Exception: + pass + state = module.params['state'] + port_exists = False + ports = [port.get('port_name') for port in ports] + + fc_ports = (module.params.get('fcaddress') + if module.params.get('fcaddress') else []) + iscsi_ports = (module.params.get('iscsi_name') + if module.params.get('iscsi_name') else []) + for port in ports: + if port in iscsi_ports or port in fc_ports: + port_exists = True + break + state_changed = False + if state == 'present' and not port_exists: + state_changed = execute_pyxcli_command( + module, 'host_add_port', xcli_client) + if state == 'absent' and port_exists: + state_changed = execute_pyxcli_command( + module, 'host_remove_port', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py b/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py new file mode 100644 index 000000000..88065aa4e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_pool +short_description: Handles pools on IBM Spectrum Accelerate Family storage systems + +description: + - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + pool: + description: + - Pool name. + required: true + type: str + state: + description: + - Pool state. + default: "present" + choices: [ "present", "absent" ] + type: str + size: + description: + - Pool size in GB + required: false + type: str + snapshot_size: + description: + - Pool snapshot size in GB + required: false + type: str + domain: + description: + - Adds the pool to the specified domain. + required: false + type: str + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Create new pool. + community.general.ibm_sa_pool: + name: pool_name + size: 300 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete pool. + community.general.ibm_sa_pool: + name: pool_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + pool=dict(required=True), + size=dict(), + snapshot_size=dict(), + domain=dict(), + perf_class=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + pool = xcli_client.cmd.pool_list( + pool=module.params['pool']).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not pool: + state_changed = execute_pyxcli_command( + module, 'pool_create', xcli_client) + if state == 'absent' and pool: + state_changed = execute_pyxcli_command( + module, 'pool_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py new file mode 100644 index 000000000..bc5f81b32 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_vol +short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems + +description: + - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + vol: + description: + - Volume name. + required: true + type: str + pool: + description: + - Volume pool. + required: false + type: str + state: + description: + - Volume state. + default: "present" + choices: [ "present", "absent" ] + type: str + size: + description: + - Volume size. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Create a new volume. + community.general.ibm_sa_vol: + vol: volume_name + pool: pool_name + size: 17 + state: present + username: admin + password: secret + endpoints: hostdev-system + +- name: Delete an existing volume. + community.general.ibm_sa_vol: + vol: volume_name + state: absent + username: admin + password: secret + endpoints: hostdev-system +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + pool=dict(), + size=dict() + ) + ) + + module = AnsibleModule(argument_spec) + + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + volume = xcli_client.cmd.vol_list( + vol=module.params.get('vol')).as_single_element + state = module.params['state'] + + state_changed = False + if state == 'present' and not volume: + state_changed = execute_pyxcli_command( + module, 'vol_create', xcli_client) + elif state == 'absent' and volume: + state_changed = execute_pyxcli_command( + module, 'vol_delete', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py new file mode 100644 index 000000000..ea8b485ef --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py @@ -0,0 +1,148 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (C) 2018 IBM CORPORATION +# Author(s): Tzur Eliyahu +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ibm_sa_vol_map +short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems + +description: + - "This module maps volumes to or unmaps them from the hosts on + IBM Spectrum Accelerate Family storage systems." + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + vol: + description: + - Volume name. + required: true + type: str + state: + default: "present" + choices: [ "present", "absent" ] + description: + - When the state is present the volume is mapped. + When the state is absent, the volume is meant to be unmapped. + type: str + + cluster: + description: + - Maps the volume to a cluster. + required: false + type: str + host: + description: + - Maps the volume to a host. + required: false + type: str + lun: + description: + - The LUN identifier. + required: false + type: str + override: + description: + - Overrides the existing volume mapping. + required: false + type: str + +extends_documentation_fragment: + - community.general.ibm_storage + - community.general.attributes + +author: + - Tzur Eliyahu (@tzure) +''' + +EXAMPLES = ''' +- name: Map volume to host. + community.general.ibm_sa_vol_map: + vol: volume_name + lun: 1 + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Map volume to cluster. + community.general.ibm_sa_vol_map: + vol: volume_name + lun: 1 + cluster: cluster_name + username: admin + password: secret + endpoints: hostdev-system + state: present + +- name: Unmap volume. + community.general.ibm_sa_vol_map: + host: host_name + username: admin + password: secret + endpoints: hostdev-system + state: absent +''' +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, + connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed) + + +def main(): + argument_spec = spectrum_accelerate_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + vol=dict(required=True), + lun=dict(), + cluster=dict(), + host=dict(), + override=dict() + ) + ) + + module = AnsibleModule(argument_spec) + is_pyxcli_installed(module) + + xcli_client = connect_ssl(module) + # required args + mapping = False + try: + mapped_hosts = xcli_client.cmd.vol_mapping_list( + vol=module.params.get('vol')).as_list + for host in mapped_hosts: + if host['host'] == module.params.get("host", ""): + mapping = True + except Exception: + pass + state = module.params['state'] + + state_changed = False + if state == 'present' and not mapping: + state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client) + if state == 'absent' and mapping: + state_changed = execute_pyxcli_command( + module, 'unmap_vol', xcli_client) + + module.exit_json(changed=state_changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/ansible_collections/community/general/plugins/modules/icinga2_feature.py new file mode 100644 index 000000000..6e6bc5416 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/icinga2_feature.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Loic Blot +# Copyright (c) 2018, Ansible Project +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: icinga2_feature + +short_description: Manage Icinga2 feature +description: + - This module can be used to enable or disable an Icinga2 feature. +author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - This is the feature name to enable or disable. + required: true + state: + type: str + description: + - If set to C(present) and feature is disabled, then feature is enabled. + - If set to C(present) and feature is already enabled, then nothing is changed. + - If set to C(absent) and feature is enabled, then feature is disabled. + - If set to C(absent) and feature is already disabled, then nothing is changed. + choices: [ "present", "absent" ] + default: present +''' + +EXAMPLES = ''' +- name: Enable ido-pgsql feature + community.general.icinga2_feature: + name: ido-pgsql + state: present + +- name: Disable api feature + community.general.icinga2_feature: + name: api + state: absent +''' + +RETURN = ''' +# +''' + +import re +from ansible.module_utils.basic import AnsibleModule + + +class Icinga2FeatureHelper: + def __init__(self, module): + self.module = module + self._icinga2 = module.get_bin_path('icinga2', True) + self.feature_name = self.module.params['name'] + self.state = self.module.params['state'] + + def _exec(self, args): + cmd = [self._icinga2, 'feature'] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return rc, out + + def manage(self): + rc, out = self._exec(["list"]) + if rc != 0: + self.module.fail_json(msg="Unable to list icinga2 features. " + "Ensure icinga2 is installed and present in binary path.") + + # If feature is already in good state, just exit + if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \ + (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"): + self.module.exit_json(changed=False) + + if self.module.check_mode: + self.module.exit_json(changed=True) + + feature_enable_str = "enable" if self.state == "present" else "disable" + + rc, out = self._exec([feature_enable_str, self.feature_name]) + + change_applied = False + if self.state == "present": + if rc != 0: + self.module.fail_json(msg="Failed to %s feature %s." + " icinga2 command returned %s" % (feature_enable_str, + self.feature_name, + out)) + + if re.search("already enabled", out) is None: + change_applied = True + else: + if rc == 0: + change_applied = True + # RC is not 0 for this already disabled feature, handle it as no change applied + elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out): + change_applied = False + else: + self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out) + + self.module.exit_json(changed=change_applied) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=["present", "absent"], default="present") + ), + supports_check_mode=True + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + Icinga2FeatureHelper(module).manage() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/icinga2_host.py b/ansible_collections/community/general/plugins/modules/icinga2_host.py new file mode 100644 index 000000000..7f25c55d9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/icinga2_host.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This module is proudly sponsored by CGI (www.cgi.com) and +# KPN (www.kpn.com). +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: icinga2_host +short_description: Manage a host in Icinga2 +description: + - "Add or remove a host to Icinga2 through the API." + - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" +author: "Jurgen Brand (@t794104)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + type: str + description: + - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + use_proxy: + description: + - If C(false), it will not use a proxy, even if one is defined in + an environment variable on the target hosts. + type: bool + default: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + url_username: + type: str + description: + - The username for use in HTTP basic authentication. + - This parameter can be used without C(url_password) for sites that allow empty passwords. + url_password: + type: str + description: + - The password for use in HTTP basic authentication. + - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + type: bool + default: false + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client + authentication. This file can also include the key as well, and if + the key is included, C(client_key) is not required. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL + client authentication. If C(client_cert) contains both the certificate + and key, this option is not required. + state: + type: str + description: + - Apply feature state. + choices: [ "present", "absent" ] + default: present + name: + type: str + description: + - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. + required: true + aliases: [host] + zone: + type: str + description: + - The zone from where this host should be polled. + template: + type: str + description: + - The template used to define the host. + - Template cannot be modified after object creation. + check_command: + type: str + description: + - The command used to check if the host is alive. + default: "hostalive" + display_name: + type: str + description: + - The name used to display the host. + - If not specified, it defaults to the value of the I(name) parameter. + ip: + type: str + description: + - The IP address of the host. + required: true + variables: + type: dict + description: + - Dictionary of variables. +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Add host to icinga + community.general.icinga2_host: + url: "https://icinga2.example.com" + url_username: "ansible" + url_password: "a_secret" + state: present + name: "{{ ansible_fqdn }}" + ip: "{{ ansible_default_ipv4.address }}" + variables: + foo: "bar" + delegate_to: 127.0.0.1 +''' + +RETURN = ''' +name: + description: The name used to create, modify or delete the host + type: str + returned: always +data: + description: The data structure used for create, modify or delete of the host + type: dict + returned: always +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +# =========================================== +# Icinga2 API class +# +class icinga2_api: + module = None + + def __init__(self, module): + self.module = module + + def call_url(self, path, data='', method='GET'): + headers = { + 'Accept': 'application/json', + 'X-HTTP-Method-Override': method, + } + url = self.module.params.get("url") + "/" + path + rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy']) + body = '' + if rsp: + body = json.loads(rsp.read()) + if info['status'] >= 400: + body = info['body'] + return {'code': info['status'], 'data': body} + + def check_connection(self): + ret = self.call_url('v1/status') + if ret['code'] == 200: + return True + return False + + def exists(self, hostname): + data = { + "filter": "match(\"" + hostname + "\", host.name)", + } + ret = self.call_url( + path="v1/objects/hosts", + data=self.module.jsonify(data) + ) + if ret['code'] == 200: + if len(ret['data']['results']) == 1: + return True + return False + + def create(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="PUT" + ) + return ret + + def delete(self, hostname): + data = {"cascade": 1} + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="DELETE" + ) + return ret + + def modify(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + data=self.module.jsonify(data), + method="POST" + ) + return ret + + def diff(self, hostname, data): + ret = self.call_url( + path="v1/objects/hosts/" + hostname, + method="GET" + ) + changed = False + ic_data = ret['data']['results'][0] + for key in data['attrs']: + if key not in ic_data['attrs'].keys(): + changed = True + elif data['attrs'][key] != ic_data['attrs'][key]: + changed = True + return changed + + +# =========================================== +# Module execution. +# +def main(): + # use the predefined argument spec for url + argument_spec = url_argument_spec() + # add our own arguments + argument_spec.update( + state=dict(default="present", choices=["absent", "present"]), + name=dict(required=True, aliases=['host']), + zone=dict(), + template=dict(default=None), + check_command=dict(default="hostalive"), + display_name=dict(default=None), + ip=dict(required=True), + variables=dict(type='dict', default=None), + ) + + # Define the main module + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params["state"] + name = module.params["name"] + zone = module.params["zone"] + template = [] + if module.params["template"]: + template = [module.params["template"]] + check_command = module.params["check_command"] + ip = module.params["ip"] + display_name = module.params["display_name"] + if not display_name: + display_name = name + variables = module.params["variables"] + + try: + icinga = icinga2_api(module=module) + icinga.check_connection() + except Exception as e: + module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) + + data = { + 'templates': template, + 'attrs': { + 'address': ip, + 'display_name': display_name, + 'check_command': check_command, + 'zone': zone, + 'vars.made_by': "ansible" + } + } + + for key, value in variables.items(): + data['attrs']['vars.' + key] = value + + changed = False + if icinga.exists(name): + if state == "absent": + if module.check_mode: + module.exit_json(changed=True, name=name, data=data) + else: + try: + ret = icinga.delete(name) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data'])) + except Exception as e: + module.fail_json(msg="exception deleting host: " + str(e)) + + elif icinga.diff(name, data): + if module.check_mode: + module.exit_json(changed=False, name=name, data=data) + + # Template attribute is not allowed in modification + del data['attrs']['templates'] + + ret = icinga.modify(name, data) + + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data'])) + + else: + if state == "present": + if module.check_mode: + changed = True + else: + try: + ret = icinga.create(name, data) + if ret['code'] == 200: + changed = True + else: + module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data'])) + except Exception as e: + module.fail_json(msg="exception creating host: " + str(e)) + + module.exit_json(changed=changed, name=name, data=data) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py new file mode 100644 index 000000000..d760a2c3a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: idrac_redfish_command +short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. + - For use with Dell iDRAC operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on iDRAC. + type: str + command: + required: true + description: + - List of commands to execute on iDRAC. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Create BIOS configuration job (schedule BIOS setting update) + community.general.idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.6.0 + sample: { + "job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011" + } +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def create_bios_config_job(self): + result = {} + key = "Bios" + jobs = "Jobs" + + # Search for 'key' entry and extract URI from it + response = self.get_request(self.root_uri + self.systems_uris[0]) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, 'msg': "Key %s not found" % key} + + bios_uri = data[key]["@odata.id"] + + # Extract proper URI + response = self.get_request(self.root_uri + bios_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][ + "@odata.id"] + + payload = {"TargetSettingsURI": set_bios_attr_uri} + response = self.post_request( + self.root_uri + self.manager_uri + "/" + jobs, payload) + if response['ret'] is False: + return response + + response_output = response['resp'].__dict__ + job_id_full = response_output["headers"]["Location"] + job_id = re.search("JID_.+", job_id_full).group() + return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full} + + +CATEGORY_COMMANDS_ALL = { + "Systems": ["CreateBiosConfigJob"], + "Accounts": [], + "Manager": [] +} + + +def main(): + result = {} + return_values = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Systems": + # execute only if we find a System resource + # NOTE: Currently overriding the usage of 'data_modification' due to + # how 'resource_id' is processed. In the case of CreateBiosConfigJob, + # we interact with BOTH systems and managers, so you currently cannot + # specify a single 'resource_id' to make both '_find_systems_resource' + # and '_find_managers_resource' return success. Since + # CreateBiosConfigJob doesn't use the matched 'resource_id' for a + # system regardless of what's specified, disabling the 'resource_id' + # inspection for the next call allows a specific manager to be + # specified with 'resource_id'. If we ever need to expand the input + # to inspect a specific system and manager in parallel, this will need + # updates. + rf_utils.data_modification = False + result = rf_utils._find_systems_resource() + rf_utils.data_modification = True + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "CreateBiosConfigJob": + # execute only if we find a Managers resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + result = rf_utils.create_bios_config_job() + if 'job_id' in result: + return_values['job_id'] = result['job_id'] + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + module.exit_json(changed=True, msg='Action was successful', return_values=return_values) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py new file mode 100644 index 000000000..cc47e62d2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py @@ -0,0 +1,339 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: idrac_redfish_config +short_description: Manages servers through iDRAC using Dell Redfish APIs +description: + - For use with Dell iDRAC operations that require Redfish OEM extensions + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to + set or update a configuration attribute. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + type: str + description: + - Category to execute on iDRAC. + command: + required: true + description: + - List of commands to execute on iDRAC. + - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and + I(SetSystemAttributes) are mutually exclusive commands when C(category) + is I(Manager). + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + manager_attributes: + required: false + description: + - Dictionary of iDRAC attribute name and value pairs to update. + default: {} + type: 'dict' + version_added: '0.2.0' + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Enable NTP and set NTP server and Time zone attributes in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Enable Syslog and set Syslog servers in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Configure SNMP community string, port, protocol and trap format + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Enable CSIOR + community.general.idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" + + - name: Set Power Supply Redundancy Policy to A/B Grid Redundant + community.general.idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.validation import ( + check_mutually_exclusive, + check_required_arguments +) +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def set_manager_attributes(self, command): + + result = {} + required_arg_spec = {'manager_attributes': {'required': True}} + + try: + check_required_arguments(required_arg_spec, self.module.params) + + except TypeError as e: + msg = to_native(e) + self.module.fail_json(msg=msg) + + key = "Attributes" + command_manager_attributes_uri_map = { + "SetManagerAttributes": self.manager_uri, + "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1", + "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1" + } + manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) + + attributes = self.module.params['manager_attributes'] + + attrs_to_patch = {} + attrs_skipped = {} + attrs_bad = {} # Store attrs which were not found in the system + + # Search for key entry and extract URI from it + response = self.get_request(self.root_uri + manager_uri + "/" + key) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if key not in data: + return {'ret': False, + 'msg': "%s: Key %s not found" % (command, key), + 'warning': ""} + + for attr_name, attr_value in attributes.items(): + # Check if attribute exists + if attr_name not in data[u'Attributes']: + # Skip and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + continue + + # Find out if value is already set to what we want. If yes, exclude + # those attributes + if data[u'Attributes'][attr_name] == attr_value: + attrs_skipped.update({attr_name: attr_value}) + else: + attrs_to_patch.update({attr_name: attr_value}) + + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) + + if not attrs_to_patch: + return {'ret': True, 'changed': False, + 'msg': "No changes made. Manager attributes already set.", + 'warning': warning} + + payload = {"Attributes": attrs_to_patch} + response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), + 'warning': warning} + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"] +} + + +# list of mutually exclusive commands for a category +CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { + "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", + "SetSystemAttributes"]] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # check for mutually exclusive commands + try: + # check_mutually_exclusive accepts a single list or list of lists that + # are groups of terms that should be mutually exclusive with one another + # and checks that against a dictionary + check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category], + dict.fromkeys(command_list, True)) + + except TypeError as e: + module.fail_json(msg=to_native(e)) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: + result = rf_utils.set_manager_attributes(command) + + # Return data back or fail with proper message + if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py new file mode 100644 index 000000000..aece61664 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: idrac_redfish_info +short_description: Gather PowerEdge server information through iDRAC using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to + get information back. + - For use with Dell EMC iDRAC operations that require Redfish OEM extensions. + - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)! +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + category: + required: true + description: + - Category to execute on iDRAC. + type: str + command: + required: true + description: + - List of commands to execute on iDRAC. + - C(GetManagerAttributes) returns the list of dicts containing iDRAC, + LifecycleController and System attributes. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iDRAC. + type: str + username: + description: + - Username for authenticating to iDRAC. + type: str + password: + description: + - Password for authenticating to iDRAC. + type: str + auth_token: + description: + - Security token for authenticating to iDRAC. + type: str + version_added: 2.3.0 + timeout: + description: + - Timeout in seconds for HTTP requests to iDRAC. + default: 10 + type: int + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Get Manager attributes with a default of 20 seconds + community.general.idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + + # Examples to display the value of all or a single iDRAC attribute + - name: Store iDRAC attributes as a fact variable + ansible.builtin.set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" + + - name: Display all iDRAC attributes + ansible.builtin.debug: + var: idrac_attributes + + - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + ansible.builtin.debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] + + # Examples to display the value of all or a single LifecycleController attribute + - name: Store LifecycleController attributes as a fact variable + ansible.builtin.set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" + + - name: Display LifecycleController attributes + ansible.builtin.debug: + var: lc_attributes + + - name: Display the value of 'CollectSystemInventoryOnRestart' attribute + ansible.builtin.debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] + + # Examples to display the value of all or a single System attribute + - name: Store System attributes as a fact variable + ansible.builtin.set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" + + - name: Display System attributes + ansible.builtin.debug: + var: system_attributes + + - name: Display the value of 'PSRedPolicy' + ansible.builtin.debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] + +''' + +RETURN = ''' +msg: + description: different results depending on task + returned: always + type: dict + sample: List of Manager attributes +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils.common.text.converters import to_native + + +class IdracRedfishUtils(RedfishUtils): + + def get_manager_attributes(self): + result = {} + manager_attributes = [] + properties = ['Attributes', 'Id'] + + response = self.get_request(self.root_uri + self.manager_uri) + + if response['ret'] is False: + return response + data = response['data'] + + # Manager attributes are supported as part of iDRAC OEM extension + # Attributes are supported only on iDRAC9 + try: + for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: + attributes_uri = members[u'@odata.id'] + + response = self.get_request(self.root_uri + attributes_uri) + if response['ret'] is False: + return response + data = response['data'] + + attributes = {} + for prop in properties: + if prop in data: + attributes[prop] = data.get(prop) + + if attributes: + manager_attributes.append(attributes) + + result['ret'] = True + + except (AttributeError, KeyError) as e: + result['ret'] = False + result['msg'] = "Failed to find attribute/key: " + str(e) + + result["entries"] = manager_attributes + return result + + +CATEGORY_COMMANDS_ALL = { + "Manager": ["GetManagerAttributes"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True, + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + + if category == "Manager": + # execute only if we find a Manager resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "GetManagerAttributes": + result = rf_utils.get_manager_attributes() + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + module.exit_json(redfish_facts=result) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py new file mode 100644 index 000000000..0ec385e73 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ilo_redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +version_added: 6.6.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. +attributes: + check_mode: + support: none + diff_mode: + support: none +extends_documentation_fragment: + - community.general.attributes +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + choices: ['Systems'] + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + required: false + description: + - Username for authenticating to iLO. + type: str + password: + required: false + description: + - Password for authenticating to iLO. + type: str + auth_token: + required: false + description: + - Security token for authenticating to iLO. + type: str + timeout: + required: false + description: + - Timeout in seconds for HTTP requests to iLO. + default: 60 + type: int +author: + - Varni H P (@varini-hp) +''' + +EXAMPLES = ''' + - name: Wait for iLO Reboot Completion + community.general.ilo_redfish_command: + category: Systems + command: WaitforiLORebootCompletion + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +ilo_redfish_command: + description: Returns the status of the operation performed on the iLO. + type: dict + contains: + WaitforiLORebootCompletion: + description: Returns the output msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Return True/False based on whether the operation was performed succesfully. + type: bool + msg: + description: Status of the operation performed on the iLO. + type: str + returned: always +''' + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["WaitforiLORebootCompletion"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + timeout=dict(type="int", default=60), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native( + "Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json( + msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Systems": + # execute only if we find a System resource + + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "WaitforiLORebootCompletion": + result[command] = rf_utils.wait_for_ilo_reboot_completion() + + # Return data back or fail with proper message + if not result[command]['ret']: + module.fail_json(msg=result) + + changed = result[command].get('changed', False) + module.exit_json(ilo_redfish_command=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py new file mode 100644 index 000000000..1f021895d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ilo_redfish_config +short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions +version_added: 4.2.0 +description: + - Builds Redfish URIs locally and sends them to iLO to + set or update a configuration attribute. + - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + type: str + description: + - Command category to execute on iLO. + choices: ['Manager'] + command: + required: true + description: + - List of commands to execute on iLO. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iLO. + type: str + username: + description: + - Username for authenticating to iLO. + type: str + password: + description: + - Password for authenticating to iLO. + type: str + auth_token: + description: + - Security token for authenticating to iLO. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to iLO. + default: 10 + type: int + attribute_name: + required: true + description: + - Name of the attribute to be configured. + type: str + attribute_value: + required: false + description: + - Value of the attribute to be configured. + type: str +author: + - "Bhavya B (@bhavya06)" +''' + +EXAMPLES = ''' + - name: Disable WINS Registration + community.general.ilo_redfish_config: + category: Manager + command: SetWINSReg + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: WINSRegistration + + - name: Set Time Zone + community.general.ilo_redfish_config: + category: Manager + command: SetTimeZone + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: TimeZone + attribute_value: Chennai + + - name: Set NTP Servers + community.general.ilo_redfish_config: + category: Manager + command: SetNTPServers + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: StaticNTPServers + attribute_value: X.X.X.X + +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +CATEGORY_COMMANDS_ALL = { + "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True, choices=list( + CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + attribute_name=dict(required=True), + attribute_value=dict(type='str'), + timeout=dict(type='int', default=10) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + creds = {"user": module.params['username'], + "pswd": module.params['password'], + "token": module.params['auth_token']} + + timeout = module.params['timeout'] + + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + mgr_attributes = {'mgr_attr_name': module.params['attribute_name'], + 'mgr_attr_value': module.params['attribute_value']} + changed = False + + offending = [ + cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]] + + if offending: + module.fail_json(msg=to_native("Invalid Command(s): '%s'. Allowed Commands = %s" % ( + offending, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Manager": + resource = rf_utils._find_managers_resource() + if not resource['ret']: + module.fail_json(msg=to_native(resource['msg'])) + + dispatch = dict( + SetTimeZone=rf_utils.set_time_zone, + SetDNSserver=rf_utils.set_dns_server, + SetDomainName=rf_utils.set_domain_name, + SetNTPServers=rf_utils.set_ntp_server, + SetWINSReg=rf_utils.set_wins_registration + ) + + for command in command_list: + result[command] = dispatch[command](mgr_attributes) + if 'changed' in result[command]: + changed |= result[command]['changed'] + + module.exit_json(ilo_redfish_config=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py new file mode 100644 index 000000000..90cafb8ec --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ilo_redfish_info +short_description: Gathers server information through iLO using Redfish APIs +version_added: 4.2.0 +description: + - Builds Redfish URIs locally and sends them to iLO to + get information back. + - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + category: + required: true + description: + - List of categories to execute on iLO. + type: list + elements: str + command: + required: true + description: + - List of commands to execute on iLO. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of iLO. + type: str + username: + description: + - Username for authenticating to iLO. + type: str + password: + description: + - Password for authenticating to iLO. + type: str + auth_token: + description: + - Security token for authenticating to iLO. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to iLO. + default: 10 + type: int +author: + - "Bhavya B (@bhavya06)" +''' + +EXAMPLES = ''' + - name: Get iLO Sessions + community.general.ilo_redfish_info: + category: Sessions + command: GetiLOSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result_sessions +''' + +RETURN = ''' +ilo_redfish_info: + description: Returns iLO sessions. + type: dict + contains: + GetiLOSessions: + description: Returns the iLO session msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Check variable to see if the information was successfully retrieved. + type: bool + msg: + description: Information of all active iLO sessions. + type: list + elements: dict + contains: + Description: + description: Provides a description of the resource. + type: str + Id: + description: The sessionId. + type: str + Name: + description: The name of the resource. + type: str + UserName: + description: Name to use to log in to the management processor. + type: str + returned: always +''' + +CATEGORY_COMMANDS_ALL = { + "Sessions": ["GetiLOSessions"] +} + +CATEGORY_COMMANDS_DEFAULT = { + "Sessions": "GetiLOSessions" +} + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils + + +def main(): + result = {} + category_list = [] + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True, type='list', elements='str'), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True + ) + + creds = {"user": module.params['username'], + "pswd": module.params['password'], + "token": module.params['auth_token']} + + timeout = module.params['timeout'] + + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in CATEGORY_COMMANDS_ALL[category]: + command_list.append(entry) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Sessions": + for command in command_list: + if command == "GetiLOSessions": + result[command] = rf_utils.get_ilo_sessions() + + module.exit_json(ilo_redfish_info=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/imc_rest.py b/ansible_collections/community/general/plugins/modules/imc_rest.py new file mode 100644 index 000000000..4bbaad23a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/imc_rest.py @@ -0,0 +1,441 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: imc_rest +short_description: Manage Cisco IMC hardware through its REST API +description: + - Provides direct access to the Cisco IMC REST API. + - Perform any configuration changes and actions that the Cisco IMC supports. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +author: + - Dag Wieers (@dagwieers) +requirements: + - lxml + - xmljson >= 0.1.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + hostname: + description: + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + required: true + aliases: [ host, ip ] + type: str + username: + description: + - Username used to login to the switch. + default: admin + aliases: [ user ] + type: str + password: + description: + - The password to use for authentication. + default: password + type: str + path: + description: + - Name of the absolute path of the filename that includes the body + of the http request being sent to the Cisco IMC REST API. + - Parameter C(path) is mutual exclusive with parameter C(content). + aliases: [ 'src', 'config_file' ] + type: path + content: + description: + - When used instead of C(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. + - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, + the Cisco IMC output is subsequently merged. + - Parameter C(content) is mutual exclusive with parameter C(path). + type: str + protocol: + description: + - Connection protocol to use. + default: https + choices: [ http, https ] + type: str + timeout: + description: + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. + If this C(timeout) is reached, the module will fail with a + C(Connection failure) indicating that C(The read operation timed out). + default: 60 + type: int + validate_certs: + description: + - If C(false), SSL certificates will not be validated. + - This should only set to C(false) used on personally controlled sites using self-signed certificates. + type: bool + default: true +notes: +- The XML fragments don't need an authentication cookie, this is injected by the module automatically. +- The Cisco IMC XML output is being translated to JSON using the Cobra convention. +- Any configConfMo change requested has a return status of 'modified', even if there was no actual change + from the previous configuration. As a result, this module will always report a change on subsequent runs. + In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. +- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) + parameter. Some XML fragments can take longer than the default timeout. +- More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) +''' + +EXAMPLES = r''' +- name: Power down server + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + content: | + + + + delegate_to: localhost + +- name: Configure IMC using multiple XML fragments + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + timeout: 120 + content: | + + + + + + + + + + delegate_to: localhost + +- name: Enable PXE boot and power-cycle server + community.general.imc_rest: + hostname: '{{ imc_hostname }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + content: | + + + + + + + + + + delegate_to: localhost + +- name: Reconfigure IMC to boot from storage + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + content: | + + + + delegate_to: localhost + +- name: Add customer description to server + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + content: | + + + + delegate_to: localhost + +- name: Disable HTTP and increase session timeout to max value 10800 secs + community.general.imc_rest: + hostname: '{{ imc_host }}' + username: '{{ imc_username }}' + password: '{{ imc_password }}' + validate_certs: false + timeout: 120 + content: | + + + + + + + + delegate_to: localhost +''' + +RETURN = r''' +aaLogin: + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention + returned: success + type: dict + sample: | + "attributes": { + "cookie": "", + "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a", + "outPriv": "admin", + "outRefreshPeriod": "600", + "outSessionId": "114", + "outVersion": "2.0(13e)", + "response": "yes" + } +configConfMo: + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention + returned: success + type: dict + sample: | +elapsed: + description: Elapsed time in seconds + returned: always + type: int + sample: 31 +response: + description: HTTP response message, including content length + returned: always + type: str + sample: OK (729 bytes) +status: + description: The HTTP response status code + returned: always + type: dict + sample: 200 +error: + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention + returned: failed + type: dict + sample: | + "attributes": { + "cookie": "", + "errorCode": "ERR-xml-parse-error", + "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ", + "invocationResult": "594", + "response": "yes" + } +error_code: + description: Cisco IMC error code + returned: failed + type: str + sample: ERR-xml-parse-error +error_text: + description: Cisco IMC error message + returned: failed + type: str + sample: | + XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. +input: + description: RAW XML input sent to the Cisco IMC, causing the error + returned: failed + type: str + sample: | + +output: + description: RAW XML output received from the Cisco IMC, with error details + returned: failed + type: str + sample: > + +''' + +import datetime +import os +import traceback + +LXML_ETREE_IMP_ERR = None +try: + import lxml.etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +XMLJSON_COBRA_IMP_ERR = None +try: + from xmljson import cobra + HAS_XMLJSON_COBRA = True +except ImportError: + XMLJSON_COBRA_IMP_ERR = traceback.format_exc() + HAS_XMLJSON_COBRA = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves import zip_longest +from ansible.module_utils.urls import fetch_url + + +def imc_response(module, rawoutput, rawinput=''): + ''' Handle IMC returned data ''' + xmloutput = lxml.etree.fromstring(rawoutput) + result = cobra.data(xmloutput) + + # Handle errors + if xmloutput.get('errorCode') and xmloutput.get('errorDescr'): + if rawinput: + result['input'] = rawinput + result['output'] = rawoutput + result['error_code'] = xmloutput.get('errorCode') + result['error_text'] = xmloutput.get('errorDescr') + module.fail_json(msg='Request failed: %(error_text)s' % result, **result) + + return result + + +def logout(module, url, cookie, timeout): + ''' Perform a logout, if needed ''' + data = '' % (cookie, cookie) + resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout) + + +def merge(one, two): + ''' Merge two complex nested datastructures into one''' + if isinstance(one, dict) and isinstance(two, dict): + copy = dict(one) + # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) + copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + return copy + + elif isinstance(one, list) and isinstance(two, list): + return [merge(alpha, beta) for (alpha, beta) in zip_longest(one, two)] + + return one if two is None else two + + +def main(): + module = AnsibleModule( + argument_spec=dict( + hostname=dict(type='str', required=True, aliases=['host', 'ip']), + username=dict(type='str', default='admin', aliases=['user']), + password=dict(type='str', default='password', no_log=True), + content=dict(type='str'), + path=dict(type='path', aliases=['config_file', 'src']), + protocol=dict(type='str', default='https', choices=['http', 'https']), + timeout=dict(type='int', default=60), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + mutually_exclusive=[['content', 'path']], + ) + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if not HAS_XMLJSON_COBRA: + module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR) + + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + + content = module.params['content'] + path = module.params['path'] + + protocol = module.params['protocol'] + timeout = module.params['timeout'] + + result = dict( + failed=False, + changed=False, + ) + + # Report missing file + file_exists = False + if path: + if os.path.isfile(path): + file_exists = True + else: + module.fail_json(msg='Cannot find/access path:\n%s' % path) + + start = datetime.datetime.utcnow() + + # Perform login first + url = '%s://%s/nuova' % (protocol, hostname) + data = '' % (username, password) + resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or auth['status'] != 200: + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) + result.update(imc_response(module, resp.read())) + + # Store cookie for future requests + cookie = '' + try: + cookie = result['aaaLogin']['attributes']['outCookie'] + except Exception: + module.fail_json(msg='Could not find cookie in output', **result) + + try: + # Prepare request data + if content: + rawdata = content + elif file_exists: + with open(path, 'r') as config_object: + rawdata = config_object.read() + + # Wrap the XML documents in a element + xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) + + # Handle each XML document separately in the same session + for xmldoc in list(xmldata): + if xmldoc.tag is lxml.etree.Comment: + continue + # Add cookie to XML + xmldoc.set('cookie', cookie) + data = lxml.etree.tostring(xmldoc) + + # Perform actual request + resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or info['status'] != 200: + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) + + # Merge results with previous results + rawoutput = resp.read() + result = merge(result, imc_response(module, rawoutput, rawinput=data)) + result['response'] = info['msg'] + result['status'] = info['status'] + + # Check for any changes + # NOTE: Unfortunately IMC API always report status as 'modified' + xmloutput = lxml.etree.fromstring(rawoutput) + results = xmloutput.xpath('/configConfMo/outConfig/*/@status') + result['changed'] = ('modified' in results) + + # Report success + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.exit_json(**result) + finally: + logout(module, url, cookie, timeout) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/imgadm.py b/ansible_collections/community/general/plugins/modules/imgadm.py new file mode 100644 index 000000000..6e4b81098 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/imgadm.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, 2017 Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: imgadm +short_description: Manage SmartOS images +description: + - Manage SmartOS virtual machine images through imgadm(1M) +author: Jasper Lievisse Adriaanse (@jasperla) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - zpool to import to or delete images from. + type: str + source: + required: false + description: + - URI for the image source. + type: str + state: + required: true + choices: [ present, absent, deleted, imported, updated, vacuumed ] + description: + - State the object operated on should be in. C(imported) is an alias for + for C(present) and C(deleted) for C(absent). When set to C(vacuumed) + and C(uuid) to C(*), it will remove all unused images. + type: str + + type: + required: false + choices: [ imgapi, docker, dsapi ] + default: imgapi + description: + - Type for image sources. + type: str + + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or C(*) for all images. + type: str + +requirements: + - python >= 2.6 +''' + +EXAMPLES = ''' +- name: Import an image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: imported + +- name: Delete an image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: deleted + +- name: Update all images + community.general.imgadm: + uuid: '*' + state: updated + +- name: Update a single image + community.general.imgadm: + uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' + state: updated + +- name: Add a source + community.general.imgadm: + source: 'https://datasets.project-fifo.net' + state: present + +- name: Add a Docker source + community.general.imgadm: + source: 'https://docker.io' + type: docker + state: present + +- name: Remove a source + community.general.imgadm: + source: 'https://docker.io' + state: absent +''' + +RETURN = ''' +source: + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net +uuid: + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 +state: + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + +# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a +# -E option to return any errors in JSON, the generated JSON does not play well +# with the JSON parsers of Python. The returned message contains '\n' as part of +# the stacktrace, which breaks the parsers. + + +class Imgadm(object): + def __init__(self, module): + self.module = module + self.params = module.params + self.cmd = module.get_bin_path('imgadm', required=True) + self.changed = False + self.uuid = module.params['uuid'] + + # Since there are a number of (natural) aliases, prevent having to look + # them up everytime we operate on `state`. + if self.params['state'] in ['present', 'imported', 'updated']: + self.present = True + else: + self.present = False + + # Perform basic UUID validation upfront. + if self.uuid and self.uuid != '*': + if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE): + module.fail_json(msg='Provided value for uuid option is not a valid UUID.') + + # Helper method to massage stderr + def errmsg(self, stderr): + match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr) + if match: + return match.groups()[0] + else: + return 'Unexpected failure' + + def update_images(self): + if self.uuid == '*': + cmd = '{0} update'.format(self.cmd) + else: + cmd = '{0} update {1}'.format(self.cmd, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr))) + + # There is no feedback from imgadm(1M) to determine if anything + # was actually changed. So treat this as an 'always-changes' operation. + # Note that 'imgadm -v' produces unparseable JSON... + self.changed = True + + def manage_sources(self): + force = self.params['force'] + source = self.params['source'] + imgtype = self.params['type'] + + cmd = '{0} sources'.format(self.cmd) + + if force: + cmd += ' -f' + + if self.present: + cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr))) + + # Check the various responses. + # Note that trying to add a source with the wrong type is handled + # above as it results in a non-zero status. + + regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source) + if re.match(regex, stdout): + self.changed = False + + regex = 'Added "%s" image source "%s"' % (imgtype, source) + if re.match(regex, stdout): + self.changed = True + else: + # Type is ignored by imgadm(1M) here + cmd += ' -d %s' % source + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr))) + + regex = 'Do not have image source "%s", no change' % source + if re.match(regex, stdout): + self.changed = False + + regex = 'Deleted ".*" image source "%s"' % source + if re.match(regex, stdout): + self.changed = True + + def manage_images(self): + pool = self.params['pool'] + state = self.params['state'] + + if state == 'vacuumed': + # Unconditionally pass '--force', otherwise we're prompted with 'y/N' + cmd = '{0} vacuum -f'.format(self.cmd) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr))) + else: + if stdout == '': + self.changed = False + else: + self.changed = True + if self.present: + cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr))) + + regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid) + if re.match(regex, stdout): + self.changed = False + + regex = '.*ActiveImageNotFound.*' + if re.match(regex, stderr): + self.changed = False + + regex = 'Imported image {0}.*'.format(self.uuid) + if re.match(regex, stdout.splitlines()[-1]): + self.changed = True + else: + cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) + + (rc, stdout, stderr) = self.module.run_command(cmd) + + regex = '.*ImageNotInstalled.*' + if re.match(regex, stderr): + # Even if the 'rc' was non-zero (3), we handled the situation + # in order to determine if there was a change. + self.changed = False + + regex = 'Deleted image {0}'.format(self.uuid) + if re.match(regex, stdout): + self.changed = True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + force=dict(type='bool'), + pool=dict(default='zones'), + source=dict(), + state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']), + type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']), + uuid=dict() + ), + # This module relies largely on imgadm(1M) to enforce idempotency, which does not + # provide a "noop" (or equivalent) mode to do a dry-run. + supports_check_mode=False, + ) + + imgadm = Imgadm(module) + + uuid = module.params['uuid'] + source = module.params['source'] + state = module.params['state'] + + result = {'state': state} + + # Either manage sources or images. + if source: + result['source'] = source + imgadm.manage_sources() + else: + result['uuid'] = uuid + + if state == 'updated': + imgadm.update_images() + else: + # Make sure operate on a single image for the following actions + if (uuid == '*') and (state != 'vacuumed'): + module.fail_json(msg='Can only specify uuid as "*" when updating image(s)') + imgadm.manage_images() + + result['changed'] = imgadm.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/infinity.py b/ansible_collections/community/general/plugins/modules/infinity.py new file mode 100644 index 000000000..65aa591f4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/infinity.py @@ -0,0 +1,575 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +module: infinity +short_description: Manage Infinity IPAM using Rest API +description: + - Manage Infinity IPAM using REST API. +author: + - Meirong Liu (@MeganLiu) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + server_ip: + description: + - Infinity server_ip with IP address. + type: str + required: true + username: + description: + - Username to access Infinity. + - The user must have REST API privileges. + type: str + required: true + password: + description: + - Infinity password. + type: str + required: true + action: + description: + - Action to perform + type: str + required: true + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] + network_id: + description: + - Network ID. + type: str + ip_address: + description: + - IP Address for a reservation or a release. + type: str + network_address: + description: + - Network address with CIDR format (e.g., 192.168.310.0). + type: str + network_size: + description: + - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). + type: str + network_name: + description: + - The name of a network. + type: str + network_location: + description: + - The parent network id for a given network. + type: int + default: -1 + network_type: + description: + - Network type defined by Infinity + type: str + choices: [ lan, shared_lan, supernet ] + default: lan + network_family: + description: + - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack + type: str + choices: [ '4', '6', dual ] + default: '4' +''' + +EXAMPLES = r''' +--- +- hosts: localhost + connection: local + strategy: debug + tasks: + - name: Reserve network into Infinity IPAM + community.general.infinity: + server_ip: 80.75.107.12 + username: username + password: password + action: reserve_network + network_name: reserve_new_ansible_network + network_family: 4 + network_type: lan + network_id: 1201 + network_size: /28 + register: infinity +''' + +RETURN = r''' +network_id: + description: id for a given network + returned: success + type: str + sample: '1501' +ip_info: + description: when reserve next available ip address from a network, the ip address info ) is returned. + returned: success + type: str + sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' +network_info: + description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. + returned: success + type: str + sample: { + "network_address": "192.168.10.32/28", + "network_family": "4", + "network_id": 3102, + "network_size": null, + "description": null, + "network_location": "3085", + "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, + "network_type": "lan", + "network_name": "'reserve_new_ansible_network'" + } +''' + + +from ansible.module_utils.basic import AnsibleModule, json +from ansible.module_utils.urls import open_url + + +class Infinity(object): + """ + Class for manage REST API calls with the Infinity. + """ + + def __init__(self, module, server_ip, username, password): + self.module = module + self.auth_user = username + self.auth_pass = password + self.base_url = "https://%s/rest/v1/" % (str(server_ip)) + + def _get_api_call_ansible_handler( + self, + method='get', + resource_url='', + stat_codes=None, + params=None, + payload_data=None): + """ + Perform the HTTPS request by using ansible get/delete method + """ + stat_codes = [200] if stat_codes is None else stat_codes + request_url = str(self.base_url) + str(resource_url) + response = None + headers = {'Content-Type': 'application/json'} + if not request_url: + self.module.exit_json( + msg="When sending Rest api call , the resource URL is empty, please check.") + if payload_data and not isinstance(payload_data, str): + payload_data = json.dumps(payload_data) + response_raw = open_url( + str(request_url), + method=method, + timeout=20, + headers=headers, + url_username=self.auth_user, + url_password=self.auth_pass, + validate_certs=False, + force_basic_auth=True, + data=payload_data) + + response = response_raw.read() + payload = '' + if response_raw.code not in stat_codes: + self.module.exit_json( + changed=False, + meta=" openurl response_raw.code show error and error code is %r" % + (response_raw.code)) + else: + if isinstance(response, str) and len(response) > 0: + payload = response + elif method.lower() == 'delete' and response_raw.code == 204: + payload = 'Delete is done.' + if isinstance(payload, dict) and "text" in payload: + self.module.exit_json( + changed=False, + meta="when calling rest api, returned data is not json ") + raise Exception(payload["text"]) + return payload + + # --------------------------------------------------------------------------- + # get_network() + # --------------------------------------------------------------------------- + def get_network(self, network_id, network_name, limit=-1): + """ + Search network_name inside Infinity by using rest api + Network id or network_name needs to be provided + return the details of a given with given network_id or name + """ + if network_name is None and network_id is None: + self.module.exit_json( + msg="You must specify one of the options 'network_name' or 'network_id'.") + method = "get" + resource_url = '' + params = {} + response = None + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler(method, resource_url) + if network_id is None and network_name: + method = "get" + resource_url = "search" + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list) and len( + response) > 1 and limit == 1: + response = response[0] + response = json.dumps(response) + return response + + # --------------------------------------------------------------------------- + # get_network_id() + # --------------------------------------------------------------------------- + def get_network_id(self, network_name="", network_type='lan'): + """ + query network_id from Infinity via rest api based on given network_name + """ + method = 'get' + resource_url = 'search' + response = None + if network_name is None: + self.module.exit_json( + msg="You must specify the option 'network_name'") + params = {"query": json.dumps( + {"name": network_name, "type": "network"})} + response = self._get_api_call_ansible_handler( + method, resource_url, payload_data=json.dumps(params)) + network_id = "" + if response and isinstance(response, str): + response = json.loads(response) + if response and isinstance(response, list): + response = response[0] + network_id = response['id'] + return network_id + + # --------------------------------------------------------------------------- + # reserve_next_available_ip() + # --------------------------------------------------------------------------- + def reserve_next_available_ip(self, network_id=""): + """ + Reserve ip address via Infinity by using rest api + network_id: the id of the network that users would like to reserve network from + return the next available ip address from that given network + """ + method = "post" + resource_url = '' + response = None + ip_info = '' + if not network_id: + self.module.exit_json( + msg="You must specify the option 'network_id'.") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_ip" + response = self._get_api_call_ansible_handler(method, resource_url) + if response and response.find( + "[") >= 0 and response.find("]") >= 0: + start_pos = response.find("{") + end_pos = response.find("}") + ip_info = response[start_pos: (end_pos + 1)] + return ip_info + + # ------------------------- + # release_ip() + # ------------------------- + def release_ip(self, network_id="", ip_address=""): + """ + Reserve ip address via Infinity by using rest api + """ + method = "get" + resource_url = '' + response = None + if ip_address is None or network_id is None: + self.module.exit_json( + msg="You must specify those two options: 'network_id' and 'ip_address'.") + + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg="There is an error in release ip %s from network %s." % + (ip_address, network_id)) + + ip_list = json.loads(response) + ip_idlist = [] + for ip_item in ip_list: + ip_id = ip_item['id'] + ip_idlist.append(ip_id) + deleted_ip_id = '' + for ip_id in ip_idlist: + ip_response = '' + resource_url = "ip_addresses/" + str(ip_id) + ip_response = self._get_api_call_ansible_handler( + method, + resource_url, + stat_codes=[200]) + if ip_response and json.loads( + ip_response)['address'] == str(ip_address): + deleted_ip_id = ip_id + break + if deleted_ip_id: + method = 'delete' + resource_url = "ip_addresses/" + str(deleted_ip_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release ip, could not find the ip address %r from the given network %r' ." % + (ip_address, network_id)) + + return response + + # ------------------- + # delete_network() + # ------------------- + def delete_network(self, network_id="", network_name=""): + """ + delete network from Infinity by using rest api + """ + method = 'delete' + resource_url = '' + response = None + if network_id is None and network_name is None: + self.module.exit_json( + msg="You must specify one of those options: 'network_id','network_name' .") + if network_id is None and network_name: + network_id = self.get_network_id(network_name=network_name) + if network_id: + resource_url = "networks/" + str(network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + return response + + # reserve_network() + # --------------------------------------------------------------------------- + def reserve_network(self, network_id="", + reserved_network_name="", reserved_network_description="", + reserved_network_size="", reserved_network_family='4', + reserved_network_type='lan', reserved_network_address="",): + """ + Reserves the first available network of specified size from a given supernet +
network_name (required)
Name of the network
+
description (optional)
Free description
+
network_family (required)
Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'
+
network_address (optional)
Address of the new network. If not given, the first network available will be created.
+
network_size (required)
Size of the new network in /<prefix> notation.
+
network_type (required)
Type of network. One of 'supernet', 'lan', 'shared_lan'
+ + """ + method = 'post' + resource_url = '' + network_info = None + if network_id is None or reserved_network_name is None or reserved_network_size is None: + self.module.exit_json( + msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'") + if network_id: + resource_url = "networks/" + str(network_id) + "/reserve_network" + if not reserved_network_family: + reserved_network_family = '4' + if not reserved_network_type: + reserved_network_type = 'lan' + payload_data = { + "network_name": reserved_network_name, + 'description': reserved_network_description, + 'network_size': reserved_network_size, + 'network_family': reserved_network_family, + 'network_type': reserved_network_type, + 'network_location': int(network_id)} + if reserved_network_address: + payload_data.update({'network_address': reserved_network_address}) + + network_info = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[200, 201], payload_data=payload_data) + + return network_info + + # --------------------------------------------------------------------------- + # release_network() + # --------------------------------------------------------------------------- + def release_network( + self, + network_id="", + released_network_name="", + released_network_type='lan'): + """ + Release the network with name 'released_network_name' from the given supernet network_id + """ + method = 'get' + response = None + if network_id is None or released_network_name is None: + self.module.exit_json( + msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'") + matched_network_id = "" + resource_url = "networks/" + str(network_id) + "/children" + response = self._get_api_call_ansible_handler(method, resource_url) + if not response: + self.module.exit_json( + msg=" there is an error in releasing network %r from network %s." % + (network_id, released_network_name)) + if response: + response = json.loads(response) + for child_net in response: + if child_net['network'] and child_net['network']['network_name'] == released_network_name: + matched_network_id = child_net['network']['network_id'] + break + response = None + if matched_network_id: + method = 'delete' + resource_url = "networks/" + str(matched_network_id) + response = self._get_api_call_ansible_handler( + method, resource_url, stat_codes=[204]) + else: + self.module.exit_json( + msg=" When release network , could not find the network %r from the given superent %r' " % + (released_network_name, network_id)) + + return response + + # --------------------------------------------------------------------------- + # add_network() + # --------------------------------------------------------------------------- + def add_network( + self, network_name="", network_address="", + network_size="", network_family='4', + network_type='lan', network_location=-1): + """ + add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet + required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ] + """ + method = 'post' + resource_url = 'networks' + response = None + if network_name is None or network_address is None or network_size is None: + self.module.exit_json( + msg="You must specify those options 'network_name', 'network_address' and 'network_size'") + + if not network_family: + network_family = '4' + if not network_type: + network_type = 'lan' + if not network_location: + network_location = -1 + payload_data = { + "network_name": network_name, + 'network_address': network_address, + 'network_size': network_size, + 'network_family': network_family, + 'network_type': network_type, + 'network_location': network_location} + response = self._get_api_call_ansible_handler( + method='post', resource_url=resource_url, + stat_codes=[200], payload_data=payload_data) + return response + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_ip=dict(type='str', required=True), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True), + network_id=dict(type='str'), + ip_address=dict(type='str'), + network_name=dict(type='str'), + network_location=dict(type='int', default=-1), + network_family=dict(type='str', default='4', choices=['4', '6', 'dual']), + network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']), + network_address=dict(type='str'), + network_size=dict(type='str'), + action=dict(type='str', required=True, choices=[ + 'add_network', + 'delete_network', + 'get_network', + 'get_network_id', + 'release_ip', + 'release_network', + 'reserve_network', + 'reserve_next_available_ip', + ],), + ), + required_together=( + ['username', 'password'], + ), + ) + server_ip = module.params["server_ip"] + username = module.params["username"] + password = module.params["password"] + action = module.params["action"] + network_id = module.params["network_id"] + released_ip = module.params["ip_address"] + network_name = module.params["network_name"] + network_family = module.params["network_family"] + network_type = module.params["network_type"] + network_address = module.params["network_address"] + network_size = module.params["network_size"] + network_location = module.params["network_location"] + my_infinity = Infinity(module, server_ip, username, password) + result = '' + if action == "reserve_next_available_ip": + if network_id: + result = my_infinity.reserve_next_available_ip(network_id) + if not result: + result = 'There is an error in calling method of reserve_next_available_ip' + module.exit_json(changed=False, meta=result) + module.exit_json(changed=True, meta=result) + elif action == "release_ip": + if network_id and released_ip: + result = my_infinity.release_ip( + network_id=network_id, ip_address=released_ip) + module.exit_json(changed=True, meta=result) + elif action == "delete_network": + result = my_infinity.delete_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + + elif action == "get_network_id": + result = my_infinity.get_network_id( + network_name=network_name, network_type=network_type) + module.exit_json(changed=True, meta=result) + elif action == "get_network": + result = my_infinity.get_network( + network_id=network_id, network_name=network_name) + module.exit_json(changed=True, meta=result) + elif action == "reserve_network": + result = my_infinity.reserve_network( + network_id=network_id, + reserved_network_name=network_name, + reserved_network_size=network_size, + reserved_network_family=network_family, + reserved_network_type=network_type, + reserved_network_address=network_address) + module.exit_json(changed=True, meta=result) + elif action == "release_network": + result = my_infinity.release_network( + network_id=network_id, + released_network_name=network_name, + released_network_type=network_type) + module.exit_json(changed=True, meta=result) + + elif action == "add_network": + result = my_infinity.add_network( + network_name=network_name, + network_location=network_location, + network_address=network_address, + network_size=network_size, + network_family=network_family, + network_type=network_type) + + module.exit_json(changed=True, meta=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/influxdb_database.py b/ansible_collections/community/general/plugins/modules/influxdb_database.py new file mode 100644 index 000000000..046b16e18 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/influxdb_database.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: influxdb_database +short_description: Manage InfluxDB databases +description: + - Manage InfluxDB databases. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [ absent, present ] + default: present + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes + +''' + +EXAMPLES = r''' +# Example influxdb_database command from Ansible Playbooks +- name: Create database + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + +- name: Destroy database + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent + +- name: Create database using custom credentials + community.general.influxdb_database: + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: true + validate_certs: true +''' + +RETURN = r''' +# only defaults +''' + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +def find_database(module, client, database_name): + database = None + + try: + databases = client.get_list_database() + for db in databases: + if db['name'] == database_name: + database = db + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + return database + + +def create_database(module, client, database_name): + if not module.check_mode: + try: + client.create_database(database_name) + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=True) + + +def drop_database(module, client, database_name): + if not module.check_mode: + try: + client.drop_database(database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + database_name=dict(required=True, type='str'), + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + database_name = influxdb.database_name + database = find_database(module, client, database_name) + + if state == 'present': + if database: + module.exit_json(changed=False) + else: + create_database(module, client, database_name) + + if state == 'absent': + if database: + drop_database(module, client, database_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/influxdb_query.py b/ansible_collections/community/general/plugins/modules/influxdb_query.py new file mode 100644 index 000000000..c2e3d8acc --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/influxdb_query.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: influxdb_query +short_description: Query data points from InfluxDB +description: + - Query data points from InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + query: + description: + - Query to be executed. + required: true + type: str + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Query connections + community.general.influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections" + register: connection + +- name: Query connections with tags filters + community.general.influxdb_query: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + query: "select mean(value) from connections where region='zue01' and host='server01'" + register: connection + +- name: Print results from the query + ansible.builtin.debug: + var: connection.query_results +''' + +RETURN = r''' +query_results: + description: Result from the query + returned: success + type: list + sample: + - mean: 1245.5333333333333 + time: "1970-01-01T00:00:00Z" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBRead(InfluxDb): + + def read_by_query(self, query): + client = self.connect_to_influxdb() + try: + rs = client.query(query) + if rs: + return list(rs.get_points()) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + query=dict(type='str', required=True), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + influx = AnsibleInfluxDBRead(module) + query = module.params.get('query') + results = influx.read_by_query(query) + module.exit_json(changed=True, query_results=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py new file mode 100644 index 000000000..28d5450ff --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: influxdb_retention_policy +short_description: Manage InfluxDB retention policies +description: + - Manage InfluxDB retention policies. +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + state: + description: + - State of the retention policy. + choices: [ absent, present ] + default: present + type: str + version_added: 3.1.0 + duration: + description: + - Determines how long InfluxDB should keep the data. If specified, it + should be C(INF) or at least one hour. If not specified, C(INF) is + assumed. Supports complex duration expressions with multiple units. + - Required only if I(state) is set to C(present). + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + - Required only if I(state) is set to C(present). + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool + default: false + shard_group_duration: + description: + - Determines the time range covered by a shard group. If specified it + must be at least one hour. If none, it's determined by InfluxDB by + the rentention policy's duration. Supports complex duration expressions + with multiple units. + type: str + version_added: '2.0.0' +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes + +''' + +EXAMPLES = r''' +# Example influxdb_retention_policy command from Ansible Playbooks +- name: Create 1 hour retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1h + replication: 1 + ssl: true + validate_certs: true + state: present + +- name: Create 1 day retention policy with 1 hour shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1d + replication: 1 + shard_group_duration: 1h + state: present + +- name: Create 1 week retention policy with 1 day shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1w + replication: 1 + shard_group_duration: 1d + state: present + +- name: Create infinite retention policy with 1 week of shard group duration + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: INF + replication: 1 + ssl: false + validate_certs: false + shard_group_duration: 1w + state: present + +- name: Create retention policy with complex durations + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: false + validate_certs: false + shard_group_duration: 1d10h30m + state: present + +- name: Drop retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent +''' + +RETURN = r''' +# only defaults +''' + +import re + +try: + import requests.exceptions + from influxdb import exceptions +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb +from ansible.module_utils.common.text.converters import to_native + + +VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') + +DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') +EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') + +DURATION_UNIT_NANOSECS = { + 'ns': 1, + 'u': 1000, + 'µ': 1000, + 'ms': 1000 * 1000, + 's': 1000 * 1000 * 1000, + 'm': 1000 * 1000 * 1000 * 60, + 'h': 1000 * 1000 * 1000 * 60 * 60, + 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, + 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, +} + +MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] +MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] + + +def check_duration_literal(value): + return VALID_DURATION_REGEX.search(value) is not None + + +def parse_duration_literal(value, extended=False): + duration = 0.0 + + if value == "INF": + return duration + + lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) + + for duration_literal in lookup: + filtered_literal = list(filter(None, duration_literal)) + duration_val = float(filtered_literal[0]) + duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] + + return duration + + +def find_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + hostname = module.params['hostname'] + retention_policy = None + + try: + retention_policies = client.get_list_retention_policies(database=database_name) + for policy in retention_policies: + if policy['name'] == policy_name: + retention_policy = policy + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) + + if retention_policy is not None: + retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) + retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) + + return retention_policy + + +def create_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + shard_group_duration = module.params['shard_group_duration'] + + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") + + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: + module.fail_json(msg="duration value must be at least 1h") + + if shard_group_duration is not None: + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") + + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") + + if not module.check_mode: + try: + if shard_group_duration: + client.create_retention_policy(policy_name, duration, replication, database_name, default, + shard_group_duration) + else: + client.create_retention_policy(policy_name, duration, replication, database_name, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def alter_retention_policy(module, client, retention_policy): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + shard_group_duration = module.params['shard_group_duration'] + + changed = False + + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") + + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: + module.fail_json(msg="duration value must be at least 1h") + + if shard_group_duration is None: + influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"] + else: + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") + + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") + + if (retention_policy['duration'] != influxdb_duration_format or + retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or + retention_policy['replicaN'] != int(replication) or + retention_policy['default'] != default): + if not module.check_mode: + try: + client.alter_retention_policy(policy_name, database_name, duration, replication, default, + shard_group_duration) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + changed = True + module.exit_json(changed=changed) + + +def drop_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + + if not module.check_mode: + try: + client.drop_retention_policy(policy_name, database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), + database_name=dict(required=True, type='str'), + policy_name=dict(required=True, type='str'), + duration=dict(type='str'), + replication=dict(type='int'), + default=dict(default=False, type='bool'), + shard_group_duration=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=( + ('state', 'present', ['duration', 'replication']), + ), + ) + + state = module.params['state'] + + influxdb = InfluxDb(module) + client = influxdb.connect_to_influxdb() + + retention_policy = find_retention_policy(module, client) + + if state == 'present': + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + + if state == 'absent': + if retention_policy: + drop_retention_policy(module, client) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/influxdb_user.py b/ansible_collections/community/general/plugins/modules/influxdb_user.py new file mode 100644 index 000000000..bbd0f8f5a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/influxdb_user.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Vitaliy Zhhuta +# insipred by Kamil Szczygiel influxdb_database module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: influxdb_user +short_description: Manage InfluxDB users +description: + - Manage InfluxDB users. +author: "Vitaliy Zhhuta (@zhhuta)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + user_name: + description: + - Name of the user. + required: true + type: str + user_password: + description: + - Password to be set for the user. + required: false + type: str + admin: + description: + - Whether the user should be in the admin role or not. + - Since version 2.8, the role will also be updated. + default: false + type: bool + state: + description: + - State of the user. + choices: [ absent, present ] + default: present + type: str + grants: + description: + - Privileges to grant to this user. + - Takes a list of dicts containing the "database" and "privilege" keys. + - If this argument is not provided, the current grants will be left alone. + - If an empty list is provided, all grants for the user will be removed. + type: list + elements: dict +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Create a user on localhost using default login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + +- name: Create a user on localhost using custom login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create an admin user on a remote host using custom login credentials + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + admin: true + hostname: "{{ influxdb_hostname }}" + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + +- name: Create a user on localhost with privileges + community.general.influxdb_user: + user_name: john + user_password: s3cr3t + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + grants: + - database: 'collectd' + privilege: 'WRITE' + - database: 'graphite' + privilege: 'READ' + +- name: Destroy a user using custom login credentials + community.general.influxdb_user: + user_name: john + login_username: "{{ influxdb_username }}" + login_password: "{{ influxdb_password }}" + state: absent +''' + +RETURN = r''' +#only defaults +''' + +import json + +from ansible.module_utils.urls import ConnectionError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +import ansible_collections.community.general.plugins.module_utils.influxdb as influx + + +def find_user(module, client, user_name): + user_result = None + + try: + users = client.get_list_users() + for user in users: + if user['user'] == user_name: + user_result = user + break + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + return user_result + + +def check_user_password(module, client, user_name, user_password): + try: + client.switch_user(user_name, user_password) + client.get_list_users() + except influx.exceptions.InfluxDBClientError as e: + if e.code == 401: + return False + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + finally: + # restore previous user + client.switch_user(module.params['username'], module.params['password']) + return True + + +def set_user_password(module, client, user_name, user_password): + if not module.check_mode: + try: + client.set_user_password(user_name, user_password) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def create_user(module, client, user_name, user_password, admin): + if not module.check_mode: + try: + client.create_user(user_name, user_password, admin) + except ConnectionError as e: + module.fail_json(msg=to_native(e)) + + +def drop_user(module, client, user_name): + if not module.check_mode: + try: + client.drop_user(user_name) + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def set_user_grants(module, client, user_name, grants): + changed = False + + current_grants = [] + try: + current_grants = client.get_list_privileges(user_name) + except influx.exceptions.InfluxDBClientError as e: + if not module.check_mode or 'user not found' not in e.content: + module.fail_json(msg=e.content) + + try: + parsed_grants = [] + # Fix privileges wording + for i, v in enumerate(current_grants): + if v['privilege'] != 'NO PRIVILEGES': + if v['privilege'] == 'ALL PRIVILEGES': + v['privilege'] = 'ALL' + parsed_grants.append(v) + + # check if the current grants are included in the desired ones + for current_grant in parsed_grants: + if current_grant not in grants: + if not module.check_mode: + client.revoke_privilege(current_grant['privilege'], + current_grant['database'], + user_name) + changed = True + + # check if the desired grants are included in the current ones + for grant in grants: + if grant not in parsed_grants: + if not module.check_mode: + client.grant_privilege(grant['privilege'], + grant['database'], + user_name) + changed = True + + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + return changed + + +INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" + + +def main(): + argument_spec = influx.InfluxDb.influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), + user_name=dict(required=True, type='str'), + user_password=dict(required=False, type='str', no_log=True), + admin=dict(default='False', type='bool'), + grants=dict(type='list', elements='dict'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + state = module.params['state'] + user_name = module.params['user_name'] + user_password = module.params['user_password'] + admin = module.params['admin'] + grants = module.params['grants'] + influxdb = influx.InfluxDb(module) + client = influxdb.connect_to_influxdb() + + user = None + try: + user = find_user(module, client, user_name) + except influx.exceptions.InfluxDBClientError as e: + if e.code == 403: + reason = None + try: + msg = json.loads(e.content) + reason = msg["error"] + except (KeyError, ValueError): + module.fail_json(msg=to_native(e)) + + if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: + module.fail_json(msg=to_native(e)) + else: + module.fail_json(msg=to_native(e)) + + changed = False + + if state == 'present': + if user: + if not check_user_password(module, client, user_name, user_password) and user_password is not None: + set_user_password(module, client, user_name, user_password) + changed = True + + try: + if admin and not user['admin']: + if not module.check_mode: + client.grant_admin_privileges(user_name) + changed = True + elif not admin and user['admin']: + if not module.check_mode: + client.revoke_admin_privileges(user_name) + changed = True + except influx.exceptions.InfluxDBClientError as e: + module.fail_json(msg=to_native(e)) + + else: + user_password = user_password or '' + create_user(module, client, user_name, user_password, admin) + changed = True + + if grants is not None: + if set_user_grants(module, client, user_name, grants): + changed = True + + module.exit_json(changed=changed) + + if state == 'absent': + if user: + drop_user(module, client, user_name) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/influxdb_write.py b/ansible_collections/community/general/plugins/modules/influxdb_write.py new file mode 100644 index 000000000..f95b6dae8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/influxdb_write.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: influxdb_write +short_description: Write data points into InfluxDB +description: + - Write data points into InfluxDB. +author: "René Moser (@resmo)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + data_points: + description: + - Data points as dict to write into the database. + required: true + type: list + elements: dict + database_name: + description: + - Name of the database. + required: true + type: str +extends_documentation_fragment: + - community.general.influxdb + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Write points into database + community.general.influxdb_write: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +''' + +RETURN = r''' +# only defaults +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb + + +class AnsibleInfluxDBWrite(InfluxDb): + + def write_data_point(self, data_points): + client = self.connect_to_influxdb() + + try: + client.write_points(data_points) + except Exception as e: + self.module.fail_json(msg=to_native(e)) + + +def main(): + argument_spec = InfluxDb.influxdb_argument_spec() + argument_spec.update( + data_points=dict(required=True, type='list', elements='dict'), + database_name=dict(required=True, type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + ) + + influx = AnsibleInfluxDBWrite(module) + data_points = module.params.get('data_points') + influx.write_data_point(data_points) + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ini_file.py b/ansible_collections/community/general/plugins/modules/ini_file.py new file mode 100644 index 000000000..874f10ae0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ini_file.py @@ -0,0 +1,490 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2015, Ales Nosek +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ini_file +short_description: Tweak settings in INI files +extends_documentation_fragment: + - files + - community.general.attributes +description: + - Manage (add, remove, change) individual settings in an INI-style file without having + to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). + - Adds missing sections if they don't exist. + - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. + - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when + no other modifications need to be applied. +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - Path to the INI-style file; this file is created if required. + - Before Ansible 2.3 this option was only usable as I(dest). + type: path + required: true + aliases: [ dest ] + section: + description: + - Section name in INI file. This is added if I(state=present) automatically when + a single value is being set. + - If left empty, being omitted, or being set to C(null), the I(option) will be placed before the first I(section). + - Using C(null) is also required if the config format does not support sections. + type: str + option: + description: + - If set (required for changing a I(value)), this is the name of the option. + - May be omitted if adding/removing a whole I(section). + type: str + value: + description: + - The string value to be associated with an I(option). + - May be omitted when removing an I(option). + - Mutually exclusive with I(values). + - I(value=v) is equivalent to I(values=[v]). + type: str + values: + description: + - The string value to be associated with an I(option). + - May be omitted when removing an I(option). + - Mutually exclusive with I(value). + - I(value=v) is equivalent to I(values=[v]). + type: list + elements: str + version_added: 3.6.0 + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: false + state: + description: + - If set to C(absent) and I(exclusive) set to C(true) all matching I(option) lines are removed. + - If set to C(absent) and I(exclusive) set to C(false) the specified I(option=value) lines are removed, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(false) the specified I(option=values) lines are added, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(true) all given I(option=values) lines will be + added and the other I(option)s with the same name are removed. + type: str + choices: [ absent, present ] + default: present + exclusive: + description: + - If set to C(true) (default), all matching I(option) lines are removed when I(state=absent), + or replaced when I(state=present). + - If set to C(false), only the specified I(value(s)) are added when I(state=present), + or removed when I(state=absent), and existing ones are not modified. + type: bool + default: true + version_added: 3.6.0 + no_extra_spaces: + description: + - Do not insert spaces before and after '=' symbol. + type: bool + default: false + create: + description: + - If set to C(false), the module will fail if the file does not already exist. + - By default it will create the file if it is missing. + type: bool + default: true + allow_no_value: + description: + - Allow option without value and without '=' symbol. + type: bool + default: false +notes: + - While it is possible to add an I(option) without specifying a I(value), this makes no sense. + - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. +author: + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +''' + +EXAMPLES = r''' +# Before Ansible 2.3, option 'dest' was used instead of 'path' +- name: Ensure "fav=lemonade is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: fav + value: lemonade + mode: '0600' + backup: true + +- name: Ensure "temperature=cold is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/anotherconf + section: drinks + option: temperature + value: cold + backup: true + +- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + value: lemon juice + mode: '0600' + state: present + exclusive: false + +- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + values: + - coke + - pepsi + mode: '0600' + state: present +''' + +import io +import os +import re +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +def match_opt(option, line): + option = re.escape(option) + return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def update_section_line(changed, section_lines, index, changed_lines, newline, msg): + option_changed = section_lines[index] != newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + section_lines[index] = newline + changed_lines[index] = 1 + return (changed, msg) + + +def do_ini(module, filename, section=None, option=None, values=None, + state='present', exclusive=True, backup=False, no_extra_spaces=False, + create=True, allow_no_value=False): + + if section is not None: + section = to_text(section) + if option is not None: + option = to_text(option) + + # deduplicate entries in values + values_unique = [] + [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] + values = values_unique + + diff = dict( + before='', + after='', + before_header='%s (content)' % filename, + after_header='%s (content)' % filename, + ) + + if not os.path.exists(filename): + if not create: + module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) + destpath = os.path.dirname(filename) + if not os.path.exists(destpath) and not module.check_mode: + os.makedirs(destpath) + ini_lines = [] + else: + with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: + ini_lines = [to_text(line) for line in ini_file.readlines()] + + if module._diff: + diff['before'] = u''.join(ini_lines) + + changed = False + + # ini file could be empty + if not ini_lines: + ini_lines.append(u'\n') + + # last line of file may not contain a trailing newline + if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': + ini_lines[-1] += u'\n' + changed = True + + # append fake section lines to simplify the logic + # At top: + # Fake random section to do not match any other in the file + # Using commit hash as fake section name + fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" + + # Insert it at the beginning + ini_lines.insert(0, u'[%s]' % fake_section_name) + + # At bottom: + ini_lines.append(u'[') + + # If no section is defined, fake section is used + if not section: + section = fake_section_name + + within_section = not section + section_start = section_end = 0 + msg = 'OK' + if no_extra_spaces: + assignment_format = u'%s=%s\n' + else: + assignment_format = u'%s = %s\n' + + option_no_value_present = False + + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + + before = after = [] + section_lines = [] + + for index, line in enumerate(ini_lines): + # find start and end of section + if line.startswith(u'[%s]' % section): + within_section = True + section_start = index + elif line.startswith(u'['): + if within_section: + section_end = index + break + + before = ini_lines[0:section_start] + section_lines = ini_lines[section_start:section_end] + after = ini_lines[section_end:len(ini_lines)] + + # Keep track of changed section_lines + changed_lines = [0] * len(section_lines) + + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex + # + # 1. edit all lines where we have a option=value pair with a matching value in values[] + # 2. edit all the remaining lines where we have a matching option + # 3. delete remaining lines where we have a matching option + # 4. insert missing option line(s) at the end of the section + + if state == 'present' and option: + for index, line in enumerate(section_lines): + if match_opt(option, line): + match = match_opt(option, line) + if values and match.group(6) in values: + matched_value = match.group(6) + if not matched_value and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + option_no_value_present = True + else: + # replace existing option=value line(s) + newline = assignment_format % (option, matched_value) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + values.remove(matched_value) + elif not values and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + option_no_value_present = True + break + + if state == 'present' and exclusive and not allow_no_value: + # override option with no value to option with value if not allow_no_value + if len(values) > 0: + for index, line in enumerate(section_lines): + if not changed_lines[index] and match_opt(option, line): + newline = assignment_format % (option, values.pop(0)) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + if len(values) == 0: + break + # remove all remaining option occurrences from the rest of the section + for index in range(len(section_lines) - 1, 0, -1): + if not changed_lines[index] and match_opt(option, section_lines[index]): + del section_lines[index] + del changed_lines[index] + changed = True + msg = 'option changed' + + if state == 'present': + # insert missing option line(s) at the end of the section + for index in range(len(section_lines), 0, -1): + # search backwards for previous non-blank or non-comment line + if not non_blank_non_comment_pattern.match(section_lines[index - 1]): + if option and values: + # insert option line(s) + for element in values[::-1]: + # items are added backwards, so traverse the list backwards to not confuse the user + # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ + if element is not None: + # insert option=value line + section_lines.insert(index, assignment_format % (option, element)) + msg = 'option added' + changed = True + elif element is None and allow_no_value: + # insert option with no value line + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' + changed = True + elif option and not values and allow_no_value and not option_no_value_present: + # insert option with no value line(s) + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' + changed = True + break + + if state == 'absent': + if option: + if exclusive: + # delete all option line(s) with given option and ignore value + new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + elif not exclusive and len(values) > 0: + # delete specified option=value line(s) + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + else: + # drop the entire section + if section_lines: + section_lines = [] + msg = 'section removed' + changed = True + + # reassemble the ini_lines after manipulation + ini_lines = before + section_lines + after + + # remove the fake section line + del ini_lines[0] + del ini_lines[-1:] + + if not within_section and state == 'present': + ini_lines.append(u'[%s]\n' % section) + msg = 'section and option added' + if option and values: + for value in values: + ini_lines.append(assignment_format % (option, value)) + elif option and not values and allow_no_value: + ini_lines.append(u'%s\n' % option) + else: + msg = 'only section added' + changed = True + + if module._diff: + diff['after'] = u''.join(ini_lines) + + backup_file = None + if changed and not module.check_mode: + if backup: + backup_file = module.backup_local(filename) + + encoded_ini_lines = [to_bytes(line) for line in ini_lines] + try: + tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) + f = os.fdopen(tmpfd, 'wb') + f.writelines(encoded_ini_lines) + f.close() + except IOError: + module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) + + try: + module.atomic_move(tmpfile, filename) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary \ + file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) + + return (changed, backup_file, diff, msg) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['dest']), + section=dict(type='str'), + option=dict(type='str'), + value=dict(type='str'), + values=dict(type='list', elements='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + exclusive=dict(type='bool', default=True), + no_extra_spaces=dict(type='bool', default=False), + allow_no_value=dict(type='bool', default=False), + create=dict(type='bool', default=True) + ), + mutually_exclusive=[ + ['value', 'values'] + ], + add_file_common_args=True, + supports_check_mode=True, + ) + + path = module.params['path'] + section = module.params['section'] + option = module.params['option'] + value = module.params['value'] + values = module.params['values'] + state = module.params['state'] + exclusive = module.params['exclusive'] + backup = module.params['backup'] + no_extra_spaces = module.params['no_extra_spaces'] + allow_no_value = module.params['allow_no_value'] + create = module.params['create'] + + if state == 'present' and not allow_no_value and value is None and not values: + module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") + + if value is not None: + values = [value] + elif values is None: + values = [] + + (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) + + if not module.check_mode and os.path.exists(path): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + results = dict( + changed=changed, + diff=diff, + msg=msg, + path=path, + ) + if backup_file is not None: + results['backup_file'] = backup_file + + # Mission complete + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/installp.py b/ansible_collections/community/general/plugins/modules/installp.py new file mode 100644 index 000000000..41064363d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/installp.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: installp +author: + - Kairo Araujo (@kairoaraujo) +short_description: Manage packages on AIX +description: + - Manage packages using 'installp' on AIX +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + accept_license: + description: + - Whether to accept the license for the package(s). + type: bool + default: false + name: + description: + - One or more packages to install or remove. + - Use C(all) to install all packages available on informed C(repository_path). + type: list + elements: str + required: true + aliases: [ pkg ] + repository_path: + description: + - Path with AIX packages (required to install). + type: path + state: + description: + - Whether the package needs to be present on or absent from the system. + type: str + choices: [ absent, present ] + default: present +notes: +- If the package is already installed, even the package/fileset is new, the module will not install it. +''' + +EXAMPLES = r''' +- name: Install package foo + community.general.installp: + name: foo + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot + community.general.installp: + name: bos.sysmgt + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt.nim.master only + community.general.installp: + name: bos.sysmgt.nim.master + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot + community.general.installp: + name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot + repository_path: /repository/AIX71/installp/base + accept_license: true + state: present + +- name: Remove packages bos.sysmgt.nim.master + community.general.installp: + name: bos.sysmgt.nim.master + state: absent +''' + +RETURN = r''' # ''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +def _check_new_pkg(module, package, repository_path): + """ + Check if the package of fileset is correct name and repository path. + + :param module: Ansible module arguments spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package information. + """ + + if os.path.isdir(repository_path): + installp_cmd = module.get_bin_path('installp', True) + rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + + if package == 'all': + pkg_info = "All packages on dir" + return True, pkg_info + + else: + pkg_info = {} + for line in package_result.splitlines(): + if re.findall(package, line): + pkg_name = line.split()[0].strip() + pkg_version = line.split()[1].strip() + pkg_info[pkg_name] = pkg_version + + return True, pkg_info + + return False, None + + else: + module.fail_json(msg="Repository path %s is not valid." % repository_path) + + +def _check_installed_pkg(module, package, repository_path): + """ + Check the package on AIX. + It verifies if the package is installed and informations + + :param module: Ansible module parameters spec. + :param package: Package/fileset name. + :param repository_path: Repository package path. + :return: Bool, package data. + """ + + lslpp_cmd = module.get_bin_path('lslpp', True) + rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) + + if rc == 1: + package_state = ' '.join(err.split()[-2:]) + if package_state == 'not installed.': + return False, None + else: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + if rc != 0: + module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err) + + pkg_data = {} + full_pkg_data = lslpp_result.splitlines() + for line in full_pkg_data: + pkg_name, fileset, level = line.split(':')[0:3] + pkg_data[pkg_name] = fileset, level + + return True, pkg_data + + +def remove(module, installp_cmd, packages): + repository_path = None + remove_count = 0 + removed_pkgs = [] + not_found_pkg = [] + for package in packages: + pkg_check, dummy = _check_installed_pkg(module, package, repository_path) + + if pkg_check: + if not module.check_mode: + rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) + if rc != 0: + module.fail_json(msg="Failed to run installp.", rc=rc, err=err) + remove_count += 1 + removed_pkgs.append(package) + + else: + not_found_pkg.append(package) + + if remove_count > 0: + if len(not_found_pkg) > 1: + not_found_pkg.insert(0, "Package(s) not found: ") + + changed = True + msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg)) + + else: + changed = False + msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg)) + + return changed, msg + + +def install(module, installp_cmd, packages, repository_path, accept_license): + installed_pkgs = [] + not_found_pkgs = [] + already_installed_pkgs = {} + + accept_license_param = { + True: '-Y', + False: '', + } + + # Validate if package exists on repository path. + for package in packages: + pkg_check, pkg_data = _check_new_pkg(module, package, repository_path) + + # If package exists on repository path, check if package is installed. + if pkg_check: + pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path) + + # If package is already installed. + if pkg_check_current: + # Check if package is a package and not a fileset, get version + # and add the package into already installed list + if package in pkg_info.keys(): + already_installed_pkgs[package] = pkg_info[package][1] + + else: + # If the package is not a package but a fileset, confirm + # and add the fileset/package into already installed list + for key in pkg_info.keys(): + if package in pkg_info[key]: + already_installed_pkgs[package] = pkg_info[key][1] + + else: + if not module.check_mode: + rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) + if rc != 0: + module.fail_json(msg="Failed to run installp", rc=rc, err=err) + installed_pkgs.append(package) + + else: + not_found_pkgs.append(package) + + if len(installed_pkgs) > 0: + installed_msg = (" Installed: %s." % ' '.join(installed_pkgs)) + else: + installed_msg = '' + + if len(not_found_pkgs) > 0: + not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs)) + else: + not_found_msg = '' + + if len(already_installed_pkgs) > 0: + already_installed_msg = (" Already installed: %s." % already_installed_pkgs) + else: + already_installed_msg = '' + + if len(installed_pkgs) > 0: + changed = True + msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + else: + changed = False + msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg)) + + return changed, msg + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True, aliases=['pkg']), + repository_path=dict(type='path'), + accept_license=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + repository_path = module.params['repository_path'] + accept_license = module.params['accept_license'] + state = module.params['state'] + + installp_cmd = module.get_bin_path('installp', True) + + if state == 'present': + if repository_path is None: + module.fail_json(msg="repository_path is required to install package") + + changed, msg = install(module, installp_cmd, name, repository_path, accept_license) + + elif state == 'absent': + changed, msg = remove(module, installp_cmd, name) + + else: + module.fail_json(changed=False, msg="Unexpected state.") + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/interfaces_file.py b/ansible_collections/community/general/plugins/modules/interfaces_file.py new file mode 100644 index 000000000..f19c019f4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/interfaces_file.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, Roman Belyakovsky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: interfaces_file +short_description: Tweak settings in /etc/network/interfaces files +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +description: + - Manage (add, remove, change) individual interface options in an interfaces-style file without having + to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. + - Read information about interfaces from interfaces-styled files +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dest: + type: path + description: + - Path to the interfaces file + default: /etc/network/interfaces + iface: + type: str + description: + - Name of the interface, required for value changes or option remove + address_family: + type: str + description: + - Address family of the interface, useful if same interface name is used for both inet and inet6 + option: + type: str + description: + - Name of the option, required for value changes or option remove + value: + type: str + description: + - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. + If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. + C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing + ones or cleaning the whole option set are supported + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: false + state: + type: str + description: + - If set to C(absent) the option or section will be removed if present instead of created. + default: "present" + choices: [ "present", "absent" ] + +notes: + - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state +requirements: [] +author: "Roman Belyakovsky (@hryamzik)" +''' + +RETURN = ''' +dest: + description: destination file/path + returned: success + type: str + sample: "/etc/network/interfaces" +ifaces: + description: interfaces dictionary + returned: success + type: complex + contains: + ifaces: + description: interface dictionary + returned: success + type: dict + contains: + eth0: + description: Name of the interface + returned: success + type: dict + contains: + address_family: + description: interface address family + returned: success + type: str + sample: "inet" + method: + description: interface method + returned: success + type: str + sample: "manual" + mtu: + description: other options, all values returned as strings + returned: success + type: str + sample: "1500" + pre-up: + description: list of C(pre-up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: list of C(up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: list of C(post-up) scripts + returned: success + type: list + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: list of C(down) scripts + returned: success + type: list + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +... +''' + +EXAMPLES = ''' +- name: Set eth1 mtu configuration value to 8000 + community.general.interfaces_file: + dest: /etc/network/interfaces.d/eth1.cfg + iface: eth1 + option: mtu + value: 8000 + backup: true + state: present + register: eth1_cfg +''' + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes + + +def lineDict(line): + return {'line': line, 'line_type': 'unknown'} + + +def optionDict(line, iface, option, value, address_family): + return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} + + +def getValueFromLine(s): + spaceRe = re.compile(r'\s+') + m = list(spaceRe.finditer(s))[-1] + valueEnd = m.start() + option = s.split()[0] + optionStart = s.find(option) + optionLen = len(option) + return s[optionLen + optionStart:].strip() + + +def read_interfaces_file(module, filename): + with open(filename, 'r') as f: + return read_interfaces_lines(module, f) + + +def read_interfaces_lines(module, line_strings): + lines = [] + ifaces = {} + currently_processing = None + i = 0 + for line in line_strings: + i += 1 + words = line.split() + if len(words) < 1: + lines.append(lineDict(line)) + continue + if words[0][0] == "#": + lines.append(lineDict(line)) + continue + if words[0] == "mapping": + # currmap = calloc(1, sizeof *currmap); + lines.append(lineDict(line)) + currently_processing = "MAPPING" + elif words[0] == "source": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-dir": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-directory": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "iface": + currif = { + "pre-up": [], + "up": [], + "down": [], + "post-up": [] + } + iface_name = words[1] + try: + currif['address_family'] = words[2] + except IndexError: + currif['address_family'] = None + address_family = currif['address_family'] + try: + currif['method'] = words[3] + except IndexError: + currif['method'] = None + + ifaces[iface_name] = currif + lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family}) + currently_processing = "IFACE" + elif words[0] == "auto": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0].startswith("allow-"): + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-auto-down": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-scripts": + lines.append(lineDict(line)) + currently_processing = "NONE" + else: + if currently_processing == "IFACE": + option_name = words[0] + value = getValueFromLine(line) + lines.append(optionDict(line, iface_name, option_name, value, address_family)) + if option_name in ["pre-up", "up", "down", "post-up"]: + currif[option_name].append(value) + else: + currif[option_name] = value + elif currently_processing == "MAPPING": + lines.append(lineDict(line)) + elif currently_processing == "NONE": + lines.append(lineDict(line)) + else: + module.fail_json(msg="misplaced option %s in line %d" % (line, i)) + return None, None + return lines, ifaces + + +def get_interface_options(iface_lines): + return [i for i in iface_lines if i['line_type'] == 'option'] + + +def get_target_options(iface_options, option): + return [i for i in iface_options if i['option'] == option] + + +def update_existing_option_line(target_option, value): + old_line = target_option['line'] + old_value = target_option['value'] + prefix_start = old_line.find(target_option["option"]) + optionLen = len(target_option["option"]) + old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:]) + start = old_value_position.start() + prefix_start + optionLen + end = old_value_position.end() + prefix_start + optionLen + line = old_line[:start] + value + old_line[end:] + return line + + +def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None): + value = str(raw_value) + changed = False + + iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface] + if address_family is not None: + iface_lines = [item for item in iface_lines + if "address_family" in item and item["address_family"] == address_family] + + if len(iface_lines) < 1: + # interface not found + module.fail_json(msg="Error: interface %s not found" % iface) + return changed, None + + iface_options = get_interface_options(iface_lines) + target_options = get_target_options(iface_options, option) + + if state == "present": + if len(target_options) < 1: + changed = True + # add new option + last_line_dict = iface_lines[-1] + changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family) + else: + if option in ["pre-up", "up", "down", "post-up"]: + if len(list(filter(lambda i: i['value'] == value, target_options))) < 1: + changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family) + else: + # if more than one option found edit the last one + if target_options[-1]['value'] != value: + changed = True + target_option = target_options[-1] + line = update_existing_option_line(target_option, value) + address_family = target_option['address_family'] + index = len(lines) - lines[::-1].index(target_option) - 1 + lines[index] = optionDict(line, iface, option, value, address_family) + elif state == "absent": + if len(target_options) >= 1: + if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None": + for target_option in [ito for ito in target_options if ito['value'] == value]: + changed = True + lines = [ln for ln in lines if ln != target_option] + else: + changed = True + for target_option in target_options: + lines = [ln for ln in lines if ln != target_option] + else: + module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state) + + return changed, lines + + +def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family): + # Changing method of interface is not an addition + if option == 'method': + changed = False + for ln in lines: + if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''): + changed = True + ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line')) + ln['params']['method'] = value + return changed, lines + + last_line = last_line_dict['line'] + prefix_start = last_line.find(last_line.split()[0]) + suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1]) + prefix = last_line[:prefix_start] + + if len(iface_options) < 1: + # interface has no options, ident + prefix += " " + + line = prefix + "%s %s" % (option, value) + last_line[suffix_start:] + option_dict = optionDict(line, iface, option, value, address_family) + index = len(lines) - lines[::-1].index(last_line_dict) + lines.insert(index, option_dict) + return True, lines + + +def write_changes(module, lines, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'wb') as f: + f.write(to_bytes(''.join(lines), errors='surrogate_or_strict')) + module.atomic_move(tmpfile, os.path.realpath(dest)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', default='/etc/network/interfaces'), + iface=dict(type='str'), + address_family=dict(type='str'), + option=dict(type='str'), + value=dict(type='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + add_file_common_args=True, + supports_check_mode=True, + required_by=dict( + option=('iface',), + ), + ) + + dest = module.params['dest'] + iface = module.params['iface'] + address_family = module.params['address_family'] + option = module.params['option'] + value = module.params['value'] + backup = module.params['backup'] + state = module.params['state'] + + if option is not None and state == "present" and value is None: + module.fail_json(msg="Value must be set if option is defined and state is 'present'") + + lines, ifaces = read_interfaces_file(module, dest) + + changed = False + + if option is not None: + changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family) + + if changed: + dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + + if changed and not module.check_mode: + if backup: + module.backup_local(dest) + write_changes(module, [d['line'] for d in lines if 'line' in d], dest) + + module.exit_json(dest=dest, changed=changed, ifaces=ifaces) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ip_netns.py b/ansible_collections/community/general/plugins/modules/ip_netns.py new file mode 100644 index 000000000..69534c810 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ip_netns.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Arie Bregman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ip_netns +author: "Arie Bregman (@bregman-arie)" +short_description: Manage network namespaces +requirements: [ ip ] +description: + - Create or delete network namespaces using the ip command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + required: false + description: + - Name of the namespace + type: str + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the namespace should exist + type: str +''' + +EXAMPLES = ''' +- name: Create a namespace named mario + community.general.ip_netns: + name: mario + state: present + +- name: Delete a namespace named luigi + community.general.ip_netns: + name: luigi + state: absent +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +class Namespace(object): + """Interface to network namespaces. """ + + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.state = module.params['state'] + + def _netns(self, command): + '''Run ip nents command''' + return self.module.run_command(['ip', 'netns'] + command) + + def exists(self): + '''Check if the namespace already exists''' + rc, out, err = self.module.run_command(['ip', 'netns', 'list']) + if rc != 0: + self.module.fail_json(msg=to_text(err)) + return self.name in out + + def add(self): + '''Create network namespace''' + rtc, out, err = self._netns(['add', self.name]) + + if rtc != 0: + self.module.fail_json(msg=err) + + def delete(self): + '''Delete network namespace''' + rtc, out, err = self._netns(['del', self.name]) + if rtc != 0: + self.module.fail_json(msg=err) + + def check(self): + '''Run check mode''' + changed = False + + if self.state == 'present' and self.exists(): + changed = True + + elif self.state == 'absent' and self.exists(): + changed = True + elif self.state == 'present' and not self.exists(): + changed = True + + self.module.exit_json(changed=changed) + + def run(self): + '''Make the necessary changes''' + changed = False + + if self.state == 'absent': + if self.exists(): + self.delete() + changed = True + elif self.state == 'present': + if not self.exists(): + self.add() + changed = True + + self.module.exit_json(changed=changed) + + +def main(): + """Entry point.""" + module = AnsibleModule( + argument_spec={ + 'name': {'default': None}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + }, + supports_check_mode=True, + ) + + network_namespace = Namespace(module) + if module.check_mode: + network_namespace.check() + else: + network_namespace.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_config.py b/ansible_collections/community/general/plugins/modules/ipa_config.py new file mode 100644 index 000000000..ec94b58d4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_config.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Fran Fitzpatrick +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_config +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage Global FreeIPA Configuration Settings +description: + - Modify global configuration settings of a FreeIPA Server. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ipaconfigstring: + description: Extra hashes to generate in password plug-in. + aliases: ["configstring"] + type: list + elements: str + choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"] + version_added: '2.5.0' + ipadefaultloginshell: + description: Default shell for new users. + aliases: ["loginshell"] + type: str + ipadefaultemaildomain: + description: Default e-mail domain for new users. + aliases: ["emaildomain"] + type: str + ipadefaultprimarygroup: + description: Default group for new users. + aliases: ["primarygroup"] + type: str + version_added: '2.5.0' + ipagroupsearchfields: + description: A list of fields to search in when searching for groups. + aliases: ["groupsearchfields"] + type: list + elements: str + version_added: '2.5.0' + ipahomesrootdir: + description: Default location of home directories. + aliases: ["homesrootdir"] + type: str + version_added: '2.5.0' + ipakrbauthzdata: + description: Default types of PAC supported for services. + aliases: ["krbauthzdata"] + type: list + elements: str + choices: ["MS-PAC", "PAD", "nfs:NONE"] + version_added: '2.5.0' + ipamaxusernamelength: + description: Maximum length of usernames. + aliases: ["maxusernamelength"] + type: int + version_added: '2.5.0' + ipapwdexpadvnotify: + description: Notice of impending password expiration, in days. + aliases: ["pwdexpadvnotify"] + type: int + version_added: '2.5.0' + ipasearchrecordslimit: + description: Maximum number of records to search (-1 or 0 is unlimited). + aliases: ["searchrecordslimit"] + type: int + version_added: '2.5.0' + ipasearchtimelimit: + description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited). + aliases: ["searchtimelimit"] + type: int + version_added: '2.5.0' + ipaselinuxusermaporder: + description: The SELinux user map order (order in increasing priority of SELinux users). + aliases: ["selinuxusermaporder"] + type: list + elements: str + version_added: '3.7.0' + ipauserauthtype: + description: The authentication type to use by default. + aliases: ["userauthtype"] + choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"] + type: list + elements: str + version_added: '2.5.0' + ipausersearchfields: + description: A list of fields to search in when searching for users. + aliases: ["usersearchfields"] + type: list + elements: str + version_added: '2.5.0' +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled + community.general.ipa_config: + ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default login shell is bash + community.general.ipa_config: + ipadefaultloginshell: /bin/bash + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default e-mail domain is ansible.com + community.general.ipa_config: + ipadefaultemaildomain: ansible.com + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default primary group is set to ipausers + community.general.ipa_config: + ipadefaultprimarygroup: ipausers + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the group search fields are set to 'cn,description' + community.general.ipa_config: + ipagroupsearchfields: ['cn', 'description'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the home directory location is set to /home + community.general.ipa_config: + ipahomesrootdir: /home + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD + community.general.ipa_config: + ipakrbauthzdata: ["MS-PAC", "PAD"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the maximum user name length is set to 32 + community.general.ipa_config: + ipamaxusernamelength: 32 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the password expiration notice is set to 4 days + community.general.ipa_config: + ipapwdexpadvnotify: 4 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search record limit is set to 100 + community.general.ipa_config: + ipasearchrecordslimit: 100 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search time limit is set to 2 seconds + community.general.ipa_config: + ipasearchtimelimit: 2 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default user auth type is password + community.general.ipa_config: + ipauserauthtype: ['password'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title' + community.general.ipa_config: + ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the SELinux user map order is set + community.general.ipa_config: + ipaselinuxusermaporder: + - "guest_u:s0" + - "xguest_u:s0" + - "user_u:s0" + - "staff_u:s0-s0:c0.c1023" + - "unconfined_u:s0-s0:c0.c1023" + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +''' + +RETURN = r''' +config: + description: Configuration as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class ConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ConfigIPAClient, self).__init__(module, host, port, protocol) + + def config_show(self): + return self._post_json(method='config_show', name=None) + + def config_mod(self, name, item): + return self._post_json(method='config_mod', name=name, item=item) + + +def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, + ipadefaultemaildomain=None, ipadefaultprimarygroup=None, + ipagroupsearchfields=None, ipahomesrootdir=None, + ipakrbauthzdata=None, ipamaxusernamelength=None, + ipapwdexpadvnotify=None, ipasearchrecordslimit=None, + ipasearchtimelimit=None, ipaselinuxusermaporder=None, + ipauserauthtype=None, ipausersearchfields=None): + config = {} + if ipaconfigstring is not None: + config['ipaconfigstring'] = ipaconfigstring + if ipadefaultloginshell is not None: + config['ipadefaultloginshell'] = ipadefaultloginshell + if ipadefaultemaildomain is not None: + config['ipadefaultemaildomain'] = ipadefaultemaildomain + if ipadefaultprimarygroup is not None: + config['ipadefaultprimarygroup'] = ipadefaultprimarygroup + if ipagroupsearchfields is not None: + config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) + if ipahomesrootdir is not None: + config['ipahomesrootdir'] = ipahomesrootdir + if ipakrbauthzdata is not None: + config['ipakrbauthzdata'] = ipakrbauthzdata + if ipamaxusernamelength is not None: + config['ipamaxusernamelength'] = str(ipamaxusernamelength) + if ipapwdexpadvnotify is not None: + config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) + if ipasearchrecordslimit is not None: + config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) + if ipasearchtimelimit is not None: + config['ipasearchtimelimit'] = str(ipasearchtimelimit) + if ipaselinuxusermaporder is not None: + config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) + if ipauserauthtype is not None: + config['ipauserauthtype'] = ipauserauthtype + if ipausersearchfields is not None: + config['ipausersearchfields'] = ','.join(ipausersearchfields) + + return config + + +def get_config_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_config = get_config_dict( + ipaconfigstring=module.params.get('ipaconfigstring'), + ipadefaultloginshell=module.params.get('ipadefaultloginshell'), + ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), + ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), + ipagroupsearchfields=module.params.get('ipagroupsearchfields'), + ipahomesrootdir=module.params.get('ipahomesrootdir'), + ipakrbauthzdata=module.params.get('ipakrbauthzdata'), + ipamaxusernamelength=module.params.get('ipamaxusernamelength'), + ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), + ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), + ipasearchtimelimit=module.params.get('ipasearchtimelimit'), + ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), + ipauserauthtype=module.params.get('ipauserauthtype'), + ipausersearchfields=module.params.get('ipausersearchfields'), + ) + ipa_config = client.config_show() + diff = get_config_diff(client, ipa_config, module_config) + + changed = False + new_config = {} + for module_key in diff: + if module_config.get(module_key) != ipa_config.get(module_key, None): + changed = True + new_config.update({module_key: module_config.get(module_key)}) + + if changed and not module.check_mode: + client.config_mod(name=None, item=new_config) + + return changed, client.config_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipaconfigstring=dict(type='list', elements='str', + choices=['AllowNThash', + 'KDC:Disable Last Success', + 'KDC:Disable Lockout', + 'KDC:Disable Default Preauth for SPNs'], + aliases=['configstring']), + ipadefaultloginshell=dict(type='str', aliases=['loginshell']), + ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), + ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), + ipagroupsearchfields=dict(type='list', elements='str', + aliases=['groupsearchfields']), + ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), + ipakrbauthzdata=dict(type='list', elements='str', + choices=['MS-PAC', 'PAD', 'nfs:NONE'], + aliases=['krbauthzdata']), + ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), + ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), + ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), + ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), + ipaselinuxusermaporder=dict(type='list', elements='str', + aliases=['selinuxusermaporder']), + ipauserauthtype=dict(type='list', elements='str', + aliases=['userauthtype'], + choices=["password", "radius", "otp", "pkinit", + "hardened", "disabled"]), + ipausersearchfields=dict(type='list', elements='str', + aliases=['usersearchfields']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = ConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py new file mode 100644 index 000000000..b1a90141b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_dnsrecord +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA DNS records +description: + - Add, modify and delete an IPA DNS Record using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + zone_name: + description: + - The DNS zone name to which DNS record needs to be managed. + required: true + type: str + record_name: + description: + - The DNS record name to manage. + required: true + aliases: ["name"] + type: str + record_type: + description: + - The type of DNS record name. + - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported. + - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." + - "'SRV' and 'MX' are added in version 2.8." + required: false + default: 'A' + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT'] + type: str + record_value: + description: + - Manage DNS record name with this value. + - Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified. + - Use I(record_values) if you need to specify multiple values. + - In the case of 'A' or 'AAAA' record types, this will be the IP address. + - In the case of 'A6' record type, this will be the A6 Record data. + - In the case of 'CNAME' record type, this will be the hostname. + - In the case of 'DNAME' record type, this will be the DNAME target. + - In the case of 'PTR' record type, this will be the hostname. + - In the case of 'TXT' record type, this will be a text. + - In the case of 'SRV' record type, this will be a service record. + - In the case of 'MX' record type, this will be a mail exchanger record. + type: str + record_values: + description: + - Manage DNS record name with this value. + - Mutually exclusive with I(record_value), and exactly one of I(record_value) and I(record_values) has to be specified. + - In the case of 'A' or 'AAAA' record types, this will be the IP address. + - In the case of 'A6' record type, this will be the A6 Record data. + - In the case of 'CNAME' record type, this will be the hostname. + - In the case of 'DNAME' record type, this will be the DNAME target. + - In the case of 'PTR' record type, this will be the hostname. + - In the case of 'TXT' record type, this will be a text. + - In the case of 'SRV' record type, this will be a service record. + - In the case of 'MX' record type, this will be a mail exchanger record. + type: list + elements: str + record_ttl: + description: + - Set the TTL for the record. + - Applies only when adding a new or changing the value of I(record_value) or I(record_values). + required: false + type: int + state: + description: State to ensure + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure dns record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: vm-001 + record_type: 'AAAA' + record_value: '::1' + +- name: Ensure that dns records exists with a TTL + community.general.ipa_dnsrecord: + name: host02 + zone_name: example.com + record_type: 'AAAA' + record_values: '::1,fe80::1' + record_ttl: 300 + ipa_host: ipa.example.com + ipa_pass: topsecret + state: present + +- name: Ensure a PTR record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: 2.168.192.in-addr.arpa + record_name: 5 + record_type: 'PTR' + record_value: 'internal.ipa.example.com' + +- name: Ensure a TXT record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos + record_type: 'TXT' + record_value: 'EXAMPLE.COM' + +- name: Ensure an SRV record is present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: _kerberos._udp.example.com + record_type: 'SRV' + record_value: '10 50 88 ipa.example.com' + +- name: Ensure an MX records are present + community.general.ipa_dnsrecord: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + record_name: '@' + record_type: 'MX' + record_values: + - '1 mailserver-01.example.com' + - '2 mailserver-02.example.com' + +- name: Ensure that dns record is removed + community.general.ipa_dnsrecord: + name: host01 + zone_name: example.com + record_type: 'AAAA' + record_value: '::1' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + state: absent +''' + +RETURN = r''' +dnsrecord: + description: DNS record as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class DNSRecordIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSRecordIPAClient, self).__init__(module, host, port, protocol) + + def dnsrecord_find(self, zone_name, record_name): + if record_name == '@': + return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True}) + else: + return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True}) + + def dnsrecord_add(self, zone_name=None, record_name=None, details=None): + item = dict(idnsname=record_name) + + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + + for value in details['record_values']: + if details['record_type'] == 'A': + item.update(a_part_ip_address=value) + elif details['record_type'] == 'AAAA': + item.update(aaaa_part_ip_address=value) + elif details['record_type'] == 'A6': + item.update(a6_part_data=value) + elif details['record_type'] == 'CNAME': + item.update(cname_part_hostname=value) + elif details['record_type'] == 'DNAME': + item.update(dname_part_target=value) + elif details['record_type'] == 'PTR': + item.update(ptr_part_hostname=value) + elif details['record_type'] == 'TXT': + item.update(txtrecord=value) + elif details['record_type'] == 'SRV': + item.update(srvrecord=value) + elif details['record_type'] == 'MX': + item.update(mxrecord=value) + + self._post_json(method='dnsrecord_add', name=zone_name, item=item) + + def dnsrecord_mod(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + if details.get('record_ttl'): + item.update(dnsttl=details['record_ttl']) + return self._post_json(method='dnsrecord_mod', name=zone_name, item=item) + + def dnsrecord_del(self, zone_name=None, record_name=None, details=None): + item = get_dnsrecord_dict(details) + item.update(idnsname=record_name) + return self._post_json(method='dnsrecord_del', name=zone_name, item=item) + + +def get_dnsrecord_dict(details=None): + module_dnsrecord = dict() + if details['record_type'] == 'A' and details['record_values']: + module_dnsrecord.update(arecord=details['record_values']) + elif details['record_type'] == 'AAAA' and details['record_values']: + module_dnsrecord.update(aaaarecord=details['record_values']) + elif details['record_type'] == 'A6' and details['record_values']: + module_dnsrecord.update(a6record=details['record_values']) + elif details['record_type'] == 'CNAME' and details['record_values']: + module_dnsrecord.update(cnamerecord=details['record_values']) + elif details['record_type'] == 'DNAME' and details['record_values']: + module_dnsrecord.update(dnamerecord=details['record_values']) + elif details['record_type'] == 'PTR' and details['record_values']: + module_dnsrecord.update(ptrrecord=details['record_values']) + elif details['record_type'] == 'TXT' and details['record_values']: + module_dnsrecord.update(txtrecord=details['record_values']) + elif details['record_type'] == 'SRV' and details['record_values']: + module_dnsrecord.update(srvrecord=details['record_values']) + elif details['record_type'] == 'MX' and details['record_values']: + module_dnsrecord.update(mxrecord=details['record_values']) + + if details.get('record_ttl'): + module_dnsrecord.update(dnsttl=details['record_ttl']) + + return module_dnsrecord + + +def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord): + details = get_dnsrecord_dict(module_dnsrecord) + return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + record_name = module.params['record_name'] + record_ttl = module.params.get('record_ttl') + state = module.params['state'] + + ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name) + + record_values = module.params['record_values'] + if module.params['record_value'] is not None: + record_values = [module.params['record_value']] + + module_dnsrecord = dict( + record_type=module.params['record_type'], + record_values=record_values, + record_ttl=to_native(record_ttl, nonstring='passthru'), + ) + + # ttl is not required to change records + if module_dnsrecord['record_ttl'] is None: + module_dnsrecord.pop('record_ttl') + + changed = False + if state == 'present': + if not ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_add(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord) + if len(diff) > 0: + changed = True + if not module.check_mode: + client.dnsrecord_mod(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + else: + if ipa_dnsrecord: + changed = True + if not module.check_mode: + client.dnsrecord_del(zone_name=zone_name, + record_name=record_name, + details=module_dnsrecord) + + return changed, client.dnsrecord_find(zone_name, record_name) + + +def main(): + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX'] + argument_spec = ipa_argument_spec() + argument_spec.update( + zone_name=dict(type='str', required=True), + record_name=dict(type='str', aliases=['name'], required=True), + record_type=dict(type='str', default='A', choices=record_types), + record_value=dict(type='str'), + record_values=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + record_ttl=dict(type='int', required=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['record_value', 'record_values']], + required_one_of=[['record_value', 'record_values']], + supports_check_mode=True + ) + + client = DNSRecordIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py new file mode 100644 index 000000000..06c93841e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) +# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_dnszone +author: Fran Fitzpatrick (@fxfitz) +short_description: Manage FreeIPA DNS Zones +description: + - Add and delete an IPA DNS Zones using IPA API +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + zone_name: + description: + - The DNS zone name to which needs to be managed. + required: true + type: str + state: + description: State to ensure + required: false + default: present + choices: ["absent", "present"] + type: str + dynamicupdate: + description: Apply dynamic update to zone. + default: false + type: bool + allowsyncptr: + description: Allow synchronization of forward and reverse records in the zone. + default: false + type: bool + version_added: 4.3.0 +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure dns zone is present + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + +- name: Ensure dns zone is present and is dynamic update + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + dynamicupdate: true + +- name: Ensure that dns zone is removed + community.general.ipa_dnszone: + zone_name: example.com + ipa_host: localhost + ipa_user: admin + ipa_pass: topsecret + state: absent + +- name: Ensure dns zone is present and is allowing sync + community.general.ipa_dnszone: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + zone_name: example.com + allowsyncptr: true +''' + +RETURN = r''' +zone: + description: DNS zone as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class DNSZoneIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(DNSZoneIPAClient, self).__init__(module, host, port, protocol) + + def dnszone_find(self, zone_name, details=None): + items = {'all': 'true', + 'idnsname': zone_name, } + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_find', + name=zone_name, + item=items + ) + + def dnszone_add(self, zone_name=None, details=None): + items = {} + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_add', + name=zone_name, + item=items + ) + + def dnszone_mod(self, zone_name=None, details=None): + items = {} + if details is not None: + items.update(details) + + return self._post_json( + method='dnszone_mod', + name=zone_name, + item=items + ) + + def dnszone_del(self, zone_name=None, record_name=None, details=None): + return self._post_json( + method='dnszone_del', name=zone_name, item={}) + + +def ensure(module, client): + zone_name = module.params['zone_name'] + state = module.params['state'] + dynamicupdate = module.params['dynamicupdate'] + allowsyncptr = module.params['allowsyncptr'] + + changed = False + + # does zone exist + ipa_dnszone = client.dnszone_find(zone_name) + + if state == 'present': + if not ipa_dnszone: + + changed = True + if not module.check_mode: + client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) + elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): + changed = True + if not module.check_mode: + client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) + else: + changed = False + + # state is absent + else: + # check for generic zone existence + if ipa_dnszone: + changed = True + if not module.check_mode: + client.dnszone_del(zone_name=zone_name) + + return changed, client.dnszone_find(zone_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(zone_name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + dynamicupdate=dict(type='bool', required=False, default=False), + allowsyncptr=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = DNSZoneIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, zone = ensure(module, client) + module.exit_json(changed=changed, zone=zone) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_group.py b/ansible_collections/community/general/plugins/modules/ipa_group.py new file mode 100644 index 000000000..87e7f0e66 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_group.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_group +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA group +description: + - Add, modify and delete group within IPA server +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + append: + description: + - If C(true), add the listed I(user) and I(group) to the group members. + - If C(false), only the listed I(user) and I(group) will be group members, removing any other members. + default: false + type: bool + version_added: 4.0.0 + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - Description of the group. + type: str + external: + description: + - Allow adding external non-IPA members from trusted domains. + type: bool + gidnumber: + description: + - GID (use this option to set it manually). + aliases: ['gid'] + type: str + group: + description: + - List of group names assigned to this group. + - If I(append=false) and an empty list is passed all groups will be removed from this group. + - Groups that are already assigned but not passed will be removed. + - If I(append=true) the listed groups will be assigned without removing other groups. + - If option is omitted assigned groups will not be checked or changed. + type: list + elements: str + nonposix: + description: + - Create as a non-POSIX group. + type: bool + user: + description: + - List of user names assigned to this group. + - If I(append=false) and an empty list is passed all users will be removed from this group. + - Users that are already assigned but not passed will be removed. + - If I(append=true) the listed users will be assigned without removing other users. + - If option is omitted assigned users will not be checked or changed. + type: list + elements: str + external_user: + description: + - List of external users assigned to this group. + - Behaves identically to I(user) with respect to I(append) attribute. + - List entries can be in C(DOMAIN\\username) or SID format. + - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users. + This is because only SIDs are returned by IPA query. + - I(external=true) is needed for this option to work. + type: list + elements: str + version_added: 6.3.0 + state: + description: + - State to ensure + default: "present" + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure group is present + community.general.ipa_group: + name: oinstall + gidnumber: '54321' + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that groups sysops and appops are assigned to ops but no other group + community.general.ipa_group: + name: ops + group: + - sysops + - appops + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that users linus and larry are assign to the group, but no other user + community.general.ipa_group: + name: sysops + user: + - linus + - larry + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that new starter named john is member of the group, without removing other members + community.general.ipa_group: + name: developers + user: + - john + append: true + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Add external user to a group + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - S-1-5-21-123-1234-12345-63421 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Add a user from MYDOMAIN + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - MYDOMAIN\\john + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure group is absent + community.general.ipa_group: + name: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +group: + description: Group as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class GroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(GroupIPAClient, self).__init__(module, host, port, protocol) + + def group_find(self, name): + return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) + + def group_add(self, name, item): + return self._post_json(method='group_add', name=name, item=item) + + def group_mod(self, name, item): + return self._post_json(method='group_mod', name=name, item=item) + + def group_del(self, name): + return self._post_json(method='group_del', name=name) + + def group_add_member(self, name, item): + return self._post_json(method='group_add_member', name=name, item=item) + + def group_add_member_group(self, name, item): + return self.group_add_member(name=name, item={'group': item}) + + def group_add_member_user(self, name, item): + return self.group_add_member(name=name, item={'user': item}) + + def group_add_member_externaluser(self, name, item): + return self.group_add_member(name=name, item={'ipaexternalmember': item}) + + def group_remove_member(self, name, item): + return self._post_json(method='group_remove_member', name=name, item=item) + + def group_remove_member_group(self, name, item): + return self.group_remove_member(name=name, item={'group': item}) + + def group_remove_member_user(self, name, item): + return self.group_remove_member(name=name, item={'user': item}) + + def group_remove_member_externaluser(self, name, item): + return self.group_remove_member(name=name, item={'ipaexternalmember': item}) + + +def get_group_dict(description=None, external=None, gid=None, nonposix=None): + group = {} + if description is not None: + group['description'] = description + if external is not None: + group['external'] = external + if gid is not None: + group['gidnumber'] = gid + if nonposix is not None: + group['nonposix'] = nonposix + return group + + +def get_group_diff(client, ipa_group, module_group): + data = [] + # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. + if 'nonposix' in module_group: + # Only non-posix groups can be changed to posix + if not module_group['nonposix'] and ipa_group.get('nonposix'): + module_group['posix'] = True + del module_group['nonposix'] + + if 'external' in module_group: + if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'): + del module_group['external'] + + return client.get_diff(ipa_data=ipa_group, module_data=module_group) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + user = module.params['user'] + external = module.params['external'] + external_user = module.params['external_user'] + append = module.params['append'] + + module_group = get_group_dict(description=module.params['description'], + external=external, + gid=module.params['gidnumber'], + nonposix=module.params['nonposix']) + ipa_group = client.group_find(name=name) + + if (not (external or external_user is None)): + module.fail_json("external_user can only be set if external = True") + + changed = False + if state == 'present': + if not ipa_group: + changed = True + if not module.check_mode: + ipa_group = client.group_add(name, item=module_group) + else: + diff = get_group_diff(client, ipa_group, module_group) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_group.get(key) + client.group_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group, + client.group_add_member_group, + client.group_remove_member_group, + append=append) or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user, + client.group_add_member_user, + client.group_remove_member_user, + append=append) or changed + + if external_user is not None: + changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user, + client.group_add_member_externaluser, + client.group_remove_member_externaluser, + append=append) or changed + else: + if ipa_group: + changed = True + if not module.check_mode: + client.group_del(name) + + return changed, client.group_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + external=dict(type='bool'), + external_user=dict(type='list', elements='str'), + gidnumber=dict(type='str', aliases=['gid']), + group=dict(type='list', elements='str'), + nonposix=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str'), + append=dict(type='bool', default=False)) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = GroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, group = ensure(module, client) + module.exit_json(changed=changed, group=group) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py new file mode 100644 index 000000000..b7633262b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_hbacrule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA HBAC rule +description: + - Add, modify or delete an IPA HBAC rule using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: Description + type: str + host: + description: + - List of host names to assign. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + required: false + type: list + elements: str + hostcategory: + description: Host category + choices: ['all'] + type: str + hostgroup: + description: + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups will be removed. from the rule + - If option is omitted hostgroups will not be checked or changed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all services will be removed from the rule. + - If option is omitted services will not be checked or changed. + type: list + elements: str + servicecategory: + description: Service category + choices: ['all'] + type: str + servicegroup: + description: + - List of service group names to assign. + - If an empty list is passed all assigned service groups will be removed from the rule. + - If option is omitted service groups will not be checked or changed. + type: list + elements: str + sourcehost: + description: + - List of source host names to assign. + - If an empty list if passed all assigned source hosts will be removed from the rule. + - If option is omitted source hosts will not be checked or changed. + type: list + elements: str + sourcehostcategory: + description: Source host category + choices: ['all'] + type: str + sourcehostgroup: + description: + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups will be removed from the rule. + - If option is omitted source host groups will not be checked or changed. + type: list + elements: str + state: + description: State to ensure + default: "present" + choices: ["absent", "disabled", "enabled","present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list if passed all assigned users will be removed from the rule. + - If option is omitted users will not be checked or changed. + type: list + elements: str + usercategory: + description: User category + choices: ['all'] + type: str + usergroup: + description: + - List of user group names to assign. + - If an empty list if passed all assigned user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure rule to allow all users to access any host from any host + community.general.ipa_hbacrule: + name: allow_all + description: Allow all users to access any host from any host + hostcategory: all + servicecategory: all + usercategory: all + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule with certain limitations + community.general.ipa_hbacrule: + name: allow_all_developers_access_to_db + description: Allow all developers to access any database from any host + hostgroup: + - db-server + usergroup: + - developers + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure rule is absent + community.general.ipa_hbacrule: + name: rule_to_be_deleted + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +hbacrule: + description: HBAC rule as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class HBACRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) + + def hbacrule_find(self, name): + return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) + + def hbacrule_add(self, name, item): + return self._post_json(method='hbacrule_add', name=name, item=item) + + def hbacrule_mod(self, name, item): + return self._post_json(method='hbacrule_mod', name=name, item=item) + + def hbacrule_del(self, name): + return self._post_json(method='hbacrule_del', name=name) + + def hbacrule_add_host(self, name, item): + return self._post_json(method='hbacrule_add_host', name=name, item=item) + + def hbacrule_remove_host(self, name, item): + return self._post_json(method='hbacrule_remove_host', name=name, item=item) + + def hbacrule_add_service(self, name, item): + return self._post_json(method='hbacrule_add_service', name=name, item=item) + + def hbacrule_remove_service(self, name, item): + return self._post_json(method='hbacrule_remove_service', name=name, item=item) + + def hbacrule_add_user(self, name, item): + return self._post_json(method='hbacrule_add_user', name=name, item=item) + + def hbacrule_remove_user(self, name, item): + return self._post_json(method='hbacrule_remove_user', name=name, item=item) + + def hbacrule_add_sourcehost(self, name, item): + return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) + + def hbacrule_remove_sourcehost(self, name, item): + return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) + + +def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, + sourcehostcategory=None, + usercategory=None): + data = {} + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if servicecategory is not None: + data['servicecategory'] = servicecategory + if sourcehostcategory is not None: + data['sourcehostcategory'] = sourcehostcategory + if usercategory is not None: + data['usercategory'] = usercategory + return data + + +def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule): + return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + service = module.params['service'] + servicecategory = module.params['servicecategory'] + servicegroup = module.params['servicegroup'] + sourcehost = module.params['sourcehost'] + sourcehostcategory = module.params['sourcehostcategory'] + sourcehostgroup = module.params['sourcehostgroup'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_hbacrule = get_hbacrule_dict(description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + servicecategory=servicecategory, + sourcehostcategory=sourcehostcategory, + usercategory=usercategory) + ipa_hbacrule = client.hbacrule_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_hbacrule: + changed = True + if not module.check_mode: + ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) + else: + diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hbacrule.get(key) + client.hbacrule_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'host') or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'hostgroup') or changed + + if service is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvc') or changed + + if servicegroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), + servicegroup, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvcgroup') or changed + + if sourcehost is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'host') or changed + + if sourcehostgroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'hostgroup') or changed + + if user is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'user') or changed + + if usergroup is not None: + changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'group') or changed + else: + if ipa_hbacrule: + changed = True + if not module.check_mode: + client.hbacrule_del(name=name) + + return changed, client.hbacrule_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + servicecategory=dict(type='str', choices=['all']), + servicegroup=dict(type='list', elements='str'), + sourcehost=dict(type='list', elements='str'), + sourcehostcategory=dict(type='str', choices=['all']), + sourcehostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True + ) + + client = HBACRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hbacrule = ensure(module, client) + module.exit_json(changed=changed, hbacrule=hbacrule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_host.py b/ansible_collections/community/general/plugins/modules/ipa_host.py new file mode 100644 index 000000000..d561401d4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_host.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_host +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host +description: + - Add, modify and delete an IPA host using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + fqdn: + description: + - Full qualified domain name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - A description of this host. + type: str + force: + description: + - Force host name even if not in DNS. + required: false + type: bool + ip_address: + description: + - Add the host to DNS with this IP address. + type: str + mac_address: + description: + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses will not be checked or changed. + - If an empty list is passed all assigned MAC addresses will be removed. + - MAC addresses that are already assigned but not passed will be removed. + aliases: ["macaddress"] + type: list + elements: str + ns_host_location: + description: + - Host location (e.g. "Lab 2") + aliases: ["nshostlocation"] + type: str + ns_hardware_platform: + description: + - Host hardware platform (e.g. "Lenovo T61") + aliases: ["nshardwareplatform"] + type: str + ns_os_version: + description: + - Host operating system and version (e.g. "Fedora 9") + aliases: ["nsosversion"] + type: str + user_certificate: + description: + - List of Base-64 encoded server certificates. + - If option is omitted certificates will not be checked or changed. + - If an empty list is passed all assigned certificates will be removed. + - Certificates already assigned but not passed will be removed. + aliases: ["usercertificate"] + type: list + elements: str + state: + description: State to ensure. + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str + update_dns: + description: + - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS. + - This option has no effect for states other than "absent". + type: bool + random_password: + description: Generate a random password to be used in bulk enrollment. + type: bool +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure host is present + community.general.ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + ns_host_location: Lab + ns_os_version: CentOS 7 + ns_hardware_platform: Lenovo T61 + mac_address: + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Generate a random password for bulk enrolment + community.general.ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + validate_certs: false + random_password: true + +- name: Ensure host is disabled + community.general.ipa_host: + name: host01.example.com + state: disabled + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that all user certificates are removed + community.general.ipa_host: + name: host01.example.com + user_certificate: [] + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host is absent + community.general.ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host and its DNS record is absent + community.general.ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_dns: true +''' + +RETURN = r''' +host: + description: Host as returned by IPA API. + returned: always + type: dict +host_diff: + description: List of options that differ and would be changed + returned: if check mode and a difference is found + type: list +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class HostIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostIPAClient, self).__init__(module, host, port, protocol) + + def host_show(self, name): + return self._post_json(method='host_show', name=name) + + def host_find(self, name): + return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) + + def host_add(self, name, host): + return self._post_json(method='host_add', name=name, item=host) + + def host_mod(self, name, host): + return self._post_json(method='host_mod', name=name, item=host) + + def host_del(self, name, update_dns): + return self._post_json(method='host_del', name=name, item={'updatedns': update_dns}) + + def host_disable(self, name): + return self._post_json(method='host_disable', name=name) + + +def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, + ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): + data = {} + if description is not None: + data['description'] = description + if force is not None: + data['force'] = force + if ip_address is not None: + data['ip_address'] = ip_address + if ns_host_location is not None: + data['nshostlocation'] = ns_host_location + if ns_hardware_platform is not None: + data['nshardwareplatform'] = ns_hardware_platform + if ns_os_version is not None: + data['nsosversion'] = ns_os_version + if user_certificate is not None: + data['usercertificate'] = [{"__base64__": item} for item in user_certificate] + if mac_address is not None: + data['macaddress'] = mac_address + if random_password is not None: + data['random'] = random_password + return data + + +def get_host_diff(client, ipa_host, module_host): + non_updateable_keys = ['force', 'ip_address'] + if not module_host.get('random'): + non_updateable_keys.append('random') + for key in non_updateable_keys: + if key in module_host: + del module_host[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_host) + + +def ensure(module, client): + name = module.params['fqdn'] + state = module.params['state'] + + ipa_host = client.host_find(name=name) + module_host = get_host_dict(description=module.params['description'], + force=module.params['force'], ip_address=module.params['ip_address'], + ns_host_location=module.params['ns_host_location'], + ns_hardware_platform=module.params['ns_hardware_platform'], + ns_os_version=module.params['ns_os_version'], + user_certificate=module.params['user_certificate'], + mac_address=module.params['mac_address'], + random_password=module.params.get('random_password'), + ) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_host: + changed = True + if not module.check_mode: + # OTP password generated by FreeIPA is visible only for host_add command + # so, return directly from here. + return changed, client.host_add(name=name, host=module_host) + else: + diff = get_host_diff(client, ipa_host, module_host) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_host.get(key) + ipa_host_show = client.host_show(name=name) + if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): + client.host_disable(name=name) + return changed, client.host_mod(name=name, host=data) + + else: + if ipa_host: + changed = True + update_dns = module.params.get('update_dns', False) + if not module.check_mode: + client.host_del(name=name, update_dns=update_dns) + + return changed, client.host_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(description=dict(type='str'), + fqdn=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool'), + ip_address=dict(type='str'), + ns_host_location=dict(type='str', aliases=['nshostlocation']), + ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), + ns_os_version=dict(type='str', aliases=['nsosversion']), + user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), + mac_address=dict(type='list', aliases=['macaddress'], elements='str'), + update_dns=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + random_password=dict(type='bool', no_log=False),) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py new file mode 100644 index 000000000..12232de89 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_hostgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host-group +description: + - Add, modify and delete an IPA host-group using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + append: + description: + - If C(true), add the listed I(host) to the I(hostgroup). + - If C(false), only the listed I(host) will be in I(hostgroup), removing any other hosts. + default: false + type: bool + version_added: 6.6.0 + cn: + description: + - Name of host-group. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + host: + description: + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts will be removed from the group. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the group. + type: list + elements: str + hostgroup: + description: + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups will be removed from the group. + - If option is omitted host-groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + type: list + elements: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure host-group databases is present + community.general.ipa_hostgroup: + name: databases + state: present + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure host-group databases is absent + community.general.ipa_hostgroup: + name: databases + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +hostgroup: + description: Hostgroup as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class HostGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(HostGroupIPAClient, self).__init__(module, host, port, protocol) + + def hostgroup_find(self, name): + return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) + + def hostgroup_add(self, name, item): + return self._post_json(method='hostgroup_add', name=name, item=item) + + def hostgroup_mod(self, name, item): + return self._post_json(method='hostgroup_mod', name=name, item=item) + + def hostgroup_del(self, name): + return self._post_json(method='hostgroup_del', name=name) + + def hostgroup_add_member(self, name, item): + return self._post_json(method='hostgroup_add_member', name=name, item=item) + + def hostgroup_add_host(self, name, item): + return self.hostgroup_add_member(name=name, item={'host': item}) + + def hostgroup_add_hostgroup(self, name, item): + return self.hostgroup_add_member(name=name, item={'hostgroup': item}) + + def hostgroup_remove_member(self, name, item): + return self._post_json(method='hostgroup_remove_member', name=name, item=item) + + def hostgroup_remove_host(self, name, item): + return self.hostgroup_remove_member(name=name, item={'host': item}) + + def hostgroup_remove_hostgroup(self, name, item): + return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) + + +def get_hostgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup): + return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + append = module.params['append'] + + ipa_hostgroup = client.hostgroup_find(name=name) + module_hostgroup = get_hostgroup_dict(description=module.params['description']) + + changed = False + if state == 'present': + if not ipa_hostgroup: + changed = True + if not module.check_mode: + ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) + else: + diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hostgroup.get(key) + client.hostgroup_mod(name=name, item=data) + + if host is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), + [item.lower() for item in host], + client.hostgroup_add_host, + client.hostgroup_remove_host, + append=append) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), + [item.lower() for item in hostgroup], + client.hostgroup_add_hostgroup, + client.hostgroup_remove_hostgroup, + append=append) or changed + + else: + if ipa_hostgroup: + changed = True + if not module.check_mode: + client.hostgroup_del(name=name) + + return changed, client.hostgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + append=dict(type='bool', default=False)) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = HostGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hostgroup = ensure(module, client) + module.exit_json(changed=changed, hostgroup=hostgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py b/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py new file mode 100644 index 000000000..e2d8f0cd5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Ansible Project +# Heavily influenced from Fran Fitzpatrick ipa_config module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_otpconfig +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTP Configuration Settings +version_added: 2.5.0 +description: + - Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ipatokentotpauthwindow: + description: TOTP authentication window in seconds. + aliases: ["totpauthwindow"] + type: int + ipatokentotpsyncwindow: + description: TOTP synchronization window in seconds. + aliases: ["totpsyncwindow"] + type: int + ipatokenhotpauthwindow: + description: HOTP authentication window in number of hops. + aliases: ["hotpauthwindow"] + type: int + ipatokenhotpsyncwindow: + description: HOTP synchronization window in hops. + aliases: ["hotpsyncwindow"] + type: int +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure the TOTP authentication window is set to 300 seconds + community.general.ipa_otpconfig: + ipatokentotpauthwindow: '300' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the TOTP synchronization window is set to 86400 seconds + community.general.ipa_otpconfig: + ipatokentotpsyncwindow: '86400' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP authentication window is set to 10 hops + community.general.ipa_otpconfig: + ipatokenhotpauthwindow: '10' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP synchronization window is set to 100 hops + community.general.ipa_otpconfig: + ipatokenhotpsyncwindow: '100' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +''' + +RETURN = r''' +otpconfig: + description: OTP configuration as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class OTPConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPConfigIPAClient, self).__init__(module, host, port, protocol) + + def otpconfig_show(self): + return self._post_json(method='otpconfig_show', name=None) + + def otpconfig_mod(self, name, item): + return self._post_json(method='otpconfig_mod', name=name, item=item) + + +def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, + ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): + + config = {} + if ipatokentotpauthwindow is not None: + config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) + if ipatokentotpsyncwindow is not None: + config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) + if ipatokenhotpauthwindow is not None: + config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) + if ipatokenhotpsyncwindow is not None: + config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) + + return config + + +def get_otpconfig_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_otpconfig = get_otpconfig_dict( + ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), + ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), + ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), + ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), + ) + ipa_otpconfig = client.otpconfig_show() + diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) + + changed = False + new_otpconfig = {} + for module_key in diff: + if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None): + changed = True + new_otpconfig.update({module_key: module_otpconfig.get(module_key)}) + + if changed and not module.check_mode: + client.otpconfig_mod(name=None, item=new_otpconfig) + + return changed, client.otpconfig_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), + ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), + ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), + ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = OTPConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, otpconfig = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otpconfig=otpconfig) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py new file mode 100644 index 000000000..f25ab6023 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py @@ -0,0 +1,534 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_otptoken +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTPs +version_added: 2.5.0 +description: + - Add, modify, and delete One Time Passwords in IPA. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + uniqueid: + description: Unique ID of the token in IPA. + required: true + aliases: ["name"] + type: str + newuniqueid: + description: If specified, the unique id specified will be changed to this. + type: str + otptype: + description: + - Type of OTP. + - "B(Note:) Cannot be modified after OTP is created." + type: str + choices: [ totp, hotp ] + secretkey: + description: + - Token secret (Base64). + - If OTP is created and this is not specified, a random secret will be generated by IPA. + - "B(Note:) Cannot be modified after OTP is created." + type: str + description: + description: Description of the token (informational only). + type: str + owner: + description: Assigned user of the token. + type: str + enabled: + description: Mark the token as enabled (default C(true)). + default: true + type: bool + notbefore: + description: + - First date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. + type: str + notafter: + description: + - Last date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. + type: str + vendor: + description: Token vendor name (informational only). + type: str + model: + description: Token model (informational only). + type: str + serial: + description: Token serial (informational only). + type: str + state: + description: State to ensure. + choices: ['present', 'absent'] + default: 'present' + type: str + algorithm: + description: + - Token hash algorithm. + - "B(Note:) Cannot be modified after OTP is created." + choices: ['sha1', 'sha256', 'sha384', 'sha512'] + type: str + digits: + description: + - Number of digits each token code will have. + - "B(Note:) Cannot be modified after OTP is created." + choices: [ 6, 8 ] + type: int + offset: + description: + - TOTP token / IPA server time difference. + - "B(Note:) Cannot be modified after OTP is created." + type: int + interval: + description: + - Length of TOTP token code validity in seconds. + - "B(Note:) Cannot be modified after OTP is created." + type: int + counter: + description: + - Initial counter for the HOTP token. + - "B(Note:) Cannot be modified after OTP is created." + type: int +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Create a totp for pinky, allowing the IPA server to generate using defaults + community.general.ipa_otptoken: + uniqueid: Token123 + otptype: totp + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Create a 8 digit hotp for pinky with sha256 with specified validity times + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: true + otptype: hotp + digits: 8 + secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9 + algorithm: sha256 + notbefore: 20180121182123 + notafter: 20220121182123 + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Update Token123 to indicate a vendor, model, serial number (info only), and description + community.general.ipa_otptoken: + uniqueid: Token123 + vendor: Acme + model: acme101 + serial: SerialNumber1 + description: Acme OTP device + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Disable Token123 + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: false + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Rename Token123 to TokenABC and enable it + community.general.ipa_otptoken: + uniqueid: Token123 + newuniqueid: TokenABC + enabled: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +otptoken: + description: OTP Token as returned by IPA API + returned: always + type: dict +''' + +import base64 +import traceback + +from ansible.module_utils.basic import AnsibleModule, sanitize_keys +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class OTPTokenIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPTokenIPAClient, self).__init__(module, host, port, protocol) + + def otptoken_find(self, name): + return self._post_json(method='otptoken_find', name=None, item={'all': True, + 'ipatokenuniqueid': name, + 'timelimit': '0', + 'sizelimit': '0'}) + + def otptoken_add(self, name, item): + return self._post_json(method='otptoken_add', name=name, item=item) + + def otptoken_mod(self, name, item): + return self._post_json(method='otptoken_mod', name=name, item=item) + + def otptoken_del(self, name): + return self._post_json(method='otptoken_del', name=name) + + +def base64_to_base32(base64_string): + """Converts base64 string to base32 string""" + b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') + return b32_string + + +def base32_to_base64(base32_string): + """Converts base32 string to base64 string""" + b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') + return b64_string + + +def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, + enabled=None, notbefore=None, notafter=None, vendor=None, + model=None, serial=None, algorithm=None, digits=None, offset=None, + interval=None, counter=None): + """Create the dictionary of settings passed in""" + + otptoken = {} + if uniqueid is not None: + otptoken[ansible_to_ipa['uniqueid']] = uniqueid + if newuniqueid is not None: + otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid + if otptype is not None: + otptoken[ansible_to_ipa['otptype']] = otptype.upper() + if secretkey is not None: + # For some unknown reason, while IPA returns the secret in base64, + # it wants the secret passed in as base32. This makes it more difficult + # for comparison (does 'current' equal to 'new'). Moreover, this may + # cause some subtle issue in a playbook as the output is encoded + # in a different way than if it was passed in as a parameter. For + # these reasons, have the module standardize on base64 input (as parameter) + # and output (from IPA). + otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) + if description is not None: + otptoken[ansible_to_ipa['description']] = description + if owner is not None: + otptoken[ansible_to_ipa['owner']] = owner + if enabled is not None: + otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE' + if notbefore is not None: + otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' + if notafter is not None: + otptoken[ansible_to_ipa['notafter']] = notafter + 'Z' + if vendor is not None: + otptoken[ansible_to_ipa['vendor']] = vendor + if model is not None: + otptoken[ansible_to_ipa['model']] = model + if serial is not None: + otptoken[ansible_to_ipa['serial']] = serial + if algorithm is not None: + otptoken[ansible_to_ipa['algorithm']] = algorithm + if digits is not None: + otptoken[ansible_to_ipa['digits']] = str(digits) + if offset is not None: + otptoken[ansible_to_ipa['offset']] = str(offset) + if interval is not None: + otptoken[ansible_to_ipa['interval']] = str(interval) + if counter is not None: + otptoken[ansible_to_ipa['counter']] = str(counter) + + return otptoken + + +def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): + """Transform the output received by IPA to a format more friendly + before it is returned to the user. IPA returns even simple + strings as a list of strings. It also returns bools and + int as string. This function cleans that up before return. + """ + updated_otptoken = ipa_otptoken + + # Used to hold values that will be sanitized from output as no_log. + # For the case where secretkey is not specified at the module, but + # is passed back from IPA. + sanitize_strings = set() + + # Rename the IPA parameters to the more friendly ansible module names for them + for ipa_parameter in ipa_to_ansible: + if ipa_parameter in ipa_otptoken: + updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter] + updated_otptoken.pop(ipa_parameter) + + # Change the type from IPA's list of string to the appropriate return value type + # based on field. By default, assume they should be strings. + for ansible_parameter in ansible_to_ipa: + if ansible_parameter in updated_otptoken: + if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: + if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: + updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) + elif ansible_parameter == 'enabled': + updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) + else: + updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] + + if 'secretkey' in updated_otptoken: + if isinstance(updated_otptoken['secretkey'], dict): + if '__base64__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) + b64key = updated_otptoken['secretkey']['__base64__'] + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b64key) + elif '__base32__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) + b32key = updated_otptoken['secretkey']['__base32__'] + b64key = base32_to_base64(b32key) + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b32key) + sanitize_strings.add(b64key) + + return updated_otptoken, sanitize_strings + + +def validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): + """Checks to see if the requested modifications are valid. Some elements + cannot be modified after initial creation. However, we still want to + validate arguments that are specified, but are not different than what + is currently set on the server. + """ + + modifications_valid = True + + for parameter in unmodifiable_after_creation: + if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken: + mod_value = module_otptoken[ansible_to_ipa[parameter]] + + # For someone unknown reason, the returns from IPA put almost all + # values in a list, even though passing them in a list (even of + # length 1) will be rejected. The module values for all elements + # other than type (totp or hotp) have this happen. + if parameter == 'otptype': + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] + else: + if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: + module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " + + "of length 1. Please open a bug report for the module.")) + if parameter == 'secretkey': + # We stored the secret key in base32 since we had assumed that would need to + # be the format if we were contacting IPA to create it. However, we are + # now comparing it against what is already set in the IPA server, so convert + # back to base64 for comparison. + mod_value = base32_to_base64(mod_value) + + # For the secret key, it is even more specific in that the key is returned + # in a dict, in the list, as the __base64__ entry for the IPA response. + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] + b64key = base32_to_base64(b32key) + ipa_value = b64key + else: + ipa_value = None + else: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0] + + if mod_value != ipa_value: + modifications_valid = False + fail_message = ("Parameter '" + parameter + "' cannot be changed once " + + "the OTP is created and the requested value specified here (" + + str(mod_value) + + ") differs from what is set in the IPA server (" + + str(ipa_value) + ")") + module.fail_json(msg=fail_message) + + return modifications_valid + + +def ensure(module, client): + # dict to map from ansible parameter names to attribute names + # used by IPA (which are not so friendly). + ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', + 'newuniqueid': 'rename', + 'otptype': 'type', + 'secretkey': 'ipatokenotpkey', + 'description': 'description', + 'owner': 'ipatokenowner', + 'enabled': 'ipatokendisabled', + 'notbefore': 'ipatokennotbefore', + 'notafter': 'ipatokennotafter', + 'vendor': 'ipatokenvendor', + 'model': 'ipatokenmodel', + 'serial': 'ipatokenserial', + 'algorithm': 'ipatokenotpalgorithm', + 'digits': 'ipatokenotpdigits', + 'offset': 'ipatokentotpclockoffset', + 'interval': 'ipatokentotptimestep', + 'counter': 'ipatokenhotpcounter'} + + # Create inverse dictionary for mapping return values + ipa_to_ansible = {} + for (k, v) in ansible_to_ipa.items(): + ipa_to_ansible[v] = k + + unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', + 'digits', 'offset', 'interval', 'counter'] + state = module.params['state'] + uniqueid = module.params['uniqueid'] + + module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, + uniqueid=module.params.get('uniqueid'), + newuniqueid=module.params.get('newuniqueid'), + otptype=module.params.get('otptype'), + secretkey=module.params.get('secretkey'), + description=module.params.get('description'), + owner=module.params.get('owner'), + enabled=module.params.get('enabled'), + notbefore=module.params.get('notbefore'), + notafter=module.params.get('notafter'), + vendor=module.params.get('vendor'), + model=module.params.get('model'), + serial=module.params.get('serial'), + algorithm=module.params.get('algorithm'), + digits=module.params.get('digits'), + offset=module.params.get('offset'), + interval=module.params.get('interval'), + counter=module.params.get('counter')) + + ipa_otptoken = client.otptoken_find(name=uniqueid) + + if ansible_to_ipa['newuniqueid'] in module_otptoken: + # Check to see if the new unique id is already taken in use + ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) + if ipa_otptoken_new: + module.fail_json(msg=("Requested rename through newuniqueid to " + + module_otptoken[ansible_to_ipa['newuniqueid']] + + " failed because the new unique id is already in use")) + + changed = False + if state == 'present': + if not ipa_otptoken: + changed = True + if not module.check_mode: + # It would not make sense to have a rename after creation, so if the user + # specified a newuniqueid, just replace the uniqueid with the updated one + # before creation + if ansible_to_ipa['newuniqueid'] in module_otptoken: + module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] + uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] + module_otptoken.pop(ansible_to_ipa['newuniqueid']) + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_add. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) + else: + if not validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): + module.fail_json(msg="Modifications requested in module are not valid") + + # IPA will reject 'modifications' that do not actually modify anything + # if any of the unmodifiable elements are specified. Explicitly + # get rid of them here. They were not different or else the + # we would have failed out in validate_modifications. + for x in unmodifiable_after_creation: + if ansible_to_ipa[x] in module_otptoken: + module_otptoken.pop(ansible_to_ipa[x]) + + diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken) + if len(diff) > 0: + changed = True + if not module.check_mode: + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_mod. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) + else: + if ipa_otptoken: + changed = True + if not module.check_mode: + client.otptoken_del(name=uniqueid) + + # Transform the output to use ansible keywords (not the IPA keywords) and + # sanitize any key values in the output. + ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible) + module.no_log_values = module.no_log_values.union(sanitize_strings) + sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values) + return changed, sanitized_otptoken + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), + newuniqueid=dict(type='str'), + otptype=dict(type='str', choices=['totp', 'hotp']), + secretkey=dict(type='str', no_log=True), + description=dict(type='str'), + owner=dict(type='str'), + enabled=dict(type='bool', default=True), + notbefore=dict(type='str'), + notafter=dict(type='str'), + vendor=dict(type='str'), + model=dict(type='str'), + serial=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), + digits=dict(type='int', choices=[6, 8]), + offset=dict(type='int'), + interval=dict(type='int'), + counter=dict(type='int')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = OTPTokenIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, otptoken = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otptoken=otptoken) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py new file mode 100644 index 000000000..6a6c4318b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_pwpolicy +author: Adralioh (@adralioh) +short_description: Manage FreeIPA password policies +description: +- Add, modify, or delete a password policy using the IPA API. +version_added: 2.0.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + group: + description: + - Name of the group that the policy applies to. + - If omitted, the global policy is used. + aliases: ["name"] + type: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + maxpwdlife: + description: Maximum password lifetime (in days). + type: str + minpwdlife: + description: Minimum password lifetime (in hours). + type: str + historylength: + description: + - Number of previous passwords that are remembered. + - Users cannot reuse remembered passwords. + type: str + minclasses: + description: Minimum number of character classes. + type: str + minlength: + description: Minimum password length. + type: str + priority: + description: + - Priority of the policy. + - High number means lower priority. + - Required when C(cn) is not the global policy. + type: str + maxfailcount: + description: Maximum number of consecutive failures before lockout. + type: str + failinterval: + description: Period (in seconds) after which the number of failed login attempts is reset. + type: str + lockouttime: + description: Period (in seconds) for which users are locked out. + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Modify the global password policy + community.general.ipa_pwpolicy: + maxpwdlife: '90' + minpwdlife: '1' + historylength: '8' + minclasses: '3' + minlength: '16' + maxfailcount: '6' + failinterval: '60' + lockouttime: '600' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure the password policy for the group admins is present + community.general.ipa_pwpolicy: + group: admins + state: present + maxpwdlife: '60' + minpwdlife: '24' + historylength: '16' + minclasses: '4' + priority: '10' + maxfailcount: '4' + failinterval: '600' + lockouttime: '1200' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that the group sysops does not have a unique password policy + community.general.ipa_pwpolicy: + group: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +pwpolicy: + description: Password policy as returned by IPA API. + returned: always + type: dict + sample: + cn: ['admins'] + cospriority: ['10'] + dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' + krbmaxpwdlife: ['60'] + krbminpwdlife: ['24'] + krbpwdfailurecountinterval: ['600'] + krbpwdhistorylength: ['16'] + krbpwdlockoutduration: ['1200'] + krbpwdmaxfailure: ['4'] + krbpwdmindiffchars: ['4'] + objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class PwPolicyIPAClient(IPAClient): + '''The global policy will be selected when `name` is `None`''' + def __init__(self, module, host, port, protocol): + super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) + + def pwpolicy_find(self, name): + if name is None: + # Manually set the cn to the global policy because pwpolicy_find will return a random + # different policy if cn is `None` + name = 'global_policy' + return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) + + def pwpolicy_add(self, name, item): + return self._post_json(method='pwpolicy_add', name=name, item=item) + + def pwpolicy_mod(self, name, item): + return self._post_json(method='pwpolicy_mod', name=name, item=item) + + def pwpolicy_del(self, name): + return self._post_json(method='pwpolicy_del', name=name) + + +def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, + minlength=None, priority=None, maxfailcount=None, failinterval=None, + lockouttime=None): + pwpolicy = {} + if maxpwdlife is not None: + pwpolicy['krbmaxpwdlife'] = maxpwdlife + if minpwdlife is not None: + pwpolicy['krbminpwdlife'] = minpwdlife + if historylength is not None: + pwpolicy['krbpwdhistorylength'] = historylength + if minclasses is not None: + pwpolicy['krbpwdmindiffchars'] = minclasses + if minlength is not None: + pwpolicy['krbpwdminlength'] = minlength + if priority is not None: + pwpolicy['cospriority'] = priority + if maxfailcount is not None: + pwpolicy['krbpwdmaxfailure'] = maxfailcount + if failinterval is not None: + pwpolicy['krbpwdfailurecountinterval'] = failinterval + if lockouttime is not None: + pwpolicy['krbpwdlockoutduration'] = lockouttime + + return pwpolicy + + +def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): + return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['group'] + + module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), + minpwdlife=module.params.get('minpwdlife'), + historylength=module.params.get('historylength'), + minclasses=module.params.get('minclasses'), + minlength=module.params.get('minlength'), + priority=module.params.get('priority'), + maxfailcount=module.params.get('maxfailcount'), + failinterval=module.params.get('failinterval'), + lockouttime=module.params.get('lockouttime')) + + ipa_pwpolicy = client.pwpolicy_find(name=name) + + changed = False + if state == 'present': + if not ipa_pwpolicy: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) + else: + diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) + else: + if ipa_pwpolicy: + changed = True + if not module.check_mode: + client.pwpolicy_del(name=name) + + return changed, ipa_pwpolicy + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(group=dict(type='str', aliases=['name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + maxpwdlife=dict(type='str'), + minpwdlife=dict(type='str'), + historylength=dict(type='str'), + minclasses=dict(type='str'), + minlength=dict(type='str'), + priority=dict(type='str'), + maxfailcount=dict(type='str'), + failinterval=dict(type='str'), + lockouttime=dict(type='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = PwPolicyIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, pwpolicy = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, pwpolicy=pwpolicy) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_role.py b/ansible_collections/community/general/plugins/modules/ipa_role.py new file mode 100644 index 000000000..fce315b66 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_role.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_role +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA role +description: +- Add, modify and delete a role within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Role name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + description: + description: + - A description of this role-group. + type: str + group: + description: + - List of group names assign to this role. + - If an empty list is passed all assigned groups will be unassigned from the role. + - If option is omitted groups will not be checked or changed. + - If option is passed all assigned groups that are not passed will be unassigned from the role. + type: list + elements: str + host: + description: + - List of host names to assign. + - If an empty list is passed all assigned hosts will be unassigned from the role. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the role. + type: list + elements: str + hostgroup: + description: + - List of host group names to assign. + - If an empty list is passed all assigned host groups will be removed from the role. + - If option is omitted host groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + type: list + elements: str + privilege: + description: + - List of privileges granted to the role. + - If an empty list is passed all assigned privileges will be removed. + - If option is omitted privileges will not be checked or changed. + - If option is passed all assigned privileges that are not passed will be removed. + type: list + elements: str + service: + description: + - List of service names to assign. + - If an empty list is passed all assigned services will be removed from the role. + - If option is omitted services will not be checked or changed. + - If option is passed all assigned services that are not passed will be removed from the role. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + user: + description: + - List of user names to assign. + - If an empty list is passed all assigned users will be removed from the role. + - If option is omitted users will not be checked or changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure role is present + community.general.ipa_role: + name: dba + description: Database Administrators + state: present + user: + - pinky + - brain + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure role with certain details + community.general.ipa_role: + name: another-role + description: Just another role + group: + - editors + host: + - host01.example.com + hostgroup: + - hostgroup01 + privilege: + - Group Administrators + - User Administrators + service: + - service01 + +- name: Ensure role is absent + community.general.ipa_role: + name: dba + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +role: + description: Role as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class RoleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(RoleIPAClient, self).__init__(module, host, port, protocol) + + def role_find(self, name): + return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) + + def role_add(self, name, item): + return self._post_json(method='role_add', name=name, item=item) + + def role_mod(self, name, item): + return self._post_json(method='role_mod', name=name, item=item) + + def role_del(self, name): + return self._post_json(method='role_del', name=name) + + def role_add_member(self, name, item): + return self._post_json(method='role_add_member', name=name, item=item) + + def role_add_group(self, name, item): + return self.role_add_member(name=name, item={'group': item}) + + def role_add_host(self, name, item): + return self.role_add_member(name=name, item={'host': item}) + + def role_add_hostgroup(self, name, item): + return self.role_add_member(name=name, item={'hostgroup': item}) + + def role_add_service(self, name, item): + return self.role_add_member(name=name, item={'service': item}) + + def role_add_user(self, name, item): + return self.role_add_member(name=name, item={'user': item}) + + def role_remove_member(self, name, item): + return self._post_json(method='role_remove_member', name=name, item=item) + + def role_remove_group(self, name, item): + return self.role_remove_member(name=name, item={'group': item}) + + def role_remove_host(self, name, item): + return self.role_remove_member(name=name, item={'host': item}) + + def role_remove_hostgroup(self, name, item): + return self.role_remove_member(name=name, item={'hostgroup': item}) + + def role_remove_service(self, name, item): + return self.role_remove_member(name=name, item={'service': item}) + + def role_remove_user(self, name, item): + return self.role_remove_member(name=name, item={'user': item}) + + def role_add_privilege(self, name, item): + return self._post_json(method='role_add_privilege', name=name, item={'privilege': item}) + + def role_remove_privilege(self, name, item): + return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item}) + + +def get_role_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_role_diff(client, ipa_role, module_role): + return client.get_diff(ipa_data=ipa_role, module_data=module_role) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + group = module.params['group'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + privilege = module.params['privilege'] + service = module.params['service'] + user = module.params['user'] + + module_role = get_role_dict(description=module.params['description']) + ipa_role = client.role_find(name=name) + + changed = False + if state == 'present': + if not ipa_role: + changed = True + if not module.check_mode: + ipa_role = client.role_add(name=name, item=module_role) + else: + diff = get_role_diff(client, ipa_role, module_role) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_role.get(key) + client.role_mod(name=name, item=data) + + if group is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group, + client.role_add_group, + client.role_remove_group) or changed + if host is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host, + client.role_add_host, + client.role_remove_host) or changed + + if hostgroup is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup, + client.role_add_hostgroup, + client.role_remove_hostgroup) or changed + + if privilege is not None: + changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege, + client.role_add_privilege, + client.role_remove_privilege) or changed + if service is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service, + client.role_add_service, + client.role_remove_service) or changed + if user is not None: + changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user, + client.role_add_user, + client.role_remove_user) or changed + + else: + if ipa_role: + changed = True + if not module.check_mode: + client.role_del(name) + + return changed, client.role_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + group=dict(type='list', elements='str'), + host=dict(type='list', elements='str'), + hostgroup=dict(type='list', elements='str'), + privilege=dict(type='list', elements='str'), + service=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + user=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = RoleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, role = ensure(module, client) + module.exit_json(changed=changed, role=role) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_service.py b/ansible_collections/community/general/plugins/modules/ipa_service.py new file mode 100644 index 000000000..d9541674f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_service.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_service +author: Cédric Parent (@cprh) +short_description: Manage FreeIPA service +description: +- Add and delete an IPA service using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + krbcanonicalname: + description: + - Principal of the service. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + hosts: + description: + - Defines the list of 'ManagedBy' hosts. + required: false + type: list + elements: str + force: + description: + - Force principal name even if host is not in DNS. + required: false + type: bool + skip_host_check: + description: + - Force service to be created even when host object does not exist to manage it. + - This is only used on creation, not for updating existing services. + required: false + type: bool + default: false + version_added: 4.7.0 + state: + description: State to ensure. + required: false + default: present + choices: ["absent", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure service is present + community.general.ipa_service: + name: http/host01.example.com + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure service is absent + community.general.ipa_service: + name: http/host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Changing Managing hosts list + community.general.ipa_service: + name: http/host01.example.com + hosts: + - host01.example.com + - host02.example.com + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +service: + description: Service as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class ServiceIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(ServiceIPAClient, self).__init__(module, host, port, protocol) + + def service_find(self, name): + return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) + + def service_add(self, name, service): + return self._post_json(method='service_add', name=name, item=service) + + def service_mod(self, name, service): + return self._post_json(method='service_mod', name=name, item=service) + + def service_del(self, name): + return self._post_json(method='service_del', name=name) + + def service_disable(self, name): + return self._post_json(method='service_disable', name=name) + + def service_add_host(self, name, item): + return self._post_json(method='service_add_host', name=name, item={'host': item}) + + def service_remove_host(self, name, item): + return self._post_json(method='service_remove_host', name=name, item={'host': item}) + + +def get_service_dict(force=None, krbcanonicalname=None, skip_host_check=None): + data = {} + if force is not None: + data['force'] = force + if krbcanonicalname is not None: + data['krbcanonicalname'] = krbcanonicalname + if skip_host_check is not None: + data['skip_host_check'] = skip_host_check + return data + + +def get_service_diff(client, ipa_host, module_service): + non_updateable_keys = ['force', 'krbcanonicalname', 'skip_host_check'] + for key in non_updateable_keys: + if key in module_service: + del module_service[key] + + return client.get_diff(ipa_data=ipa_host, module_data=module_service) + + +def ensure(module, client): + name = module.params['krbcanonicalname'] + state = module.params['state'] + hosts = module.params['hosts'] + + ipa_service = client.service_find(name=name) + module_service = get_service_dict(force=module.params['force'], skip_host_check=module.params['skip_host_check']) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_service: + changed = True + if not module.check_mode: + client.service_add(name=name, service=module_service) + else: + diff = get_service_diff(client, ipa_service, module_service) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_service.get(key) + client.service_mod(name=name, service=data) + if hosts is not None: + if 'managedby_host' in ipa_service: + for host in ipa_service['managedby_host']: + if host not in hosts: + if not module.check_mode: + client.service_remove_host(name=name, item=host) + changed = True + for host in hosts: + if host not in ipa_service['managedby_host']: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + else: + for host in hosts: + if not module.check_mode: + client.service_add_host(name=name, item=host) + changed = True + + else: + if ipa_service: + changed = True + if not module.check_mode: + client.service_del(name=name) + + return changed, client.service_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + krbcanonicalname=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool', required=False), + skip_host_check=dict(type='bool', default=False, required=False), + hosts=dict(type='list', required=False, elements='str'), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = ServiceIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_subca.py b/ansible_collections/community/general/plugins/modules/ipa_subca.py new file mode 100644 index 000000000..882b1ac39 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_subca.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_subca +author: Abhijeet Kasurde (@Akasurde) +short_description: Manage FreeIPA Lightweight Sub Certificate Authorities +description: +- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + subca_name: + description: + - The Sub Certificate Authority name which needs to be managed. + required: true + aliases: ["name"] + type: str + subca_subject: + description: + - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. + required: true + type: str + subca_desc: + description: + - The Sub Certificate Authority's description. + type: str + state: + description: + - State to ensure. + - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. + required: false + default: present + choices: ["absent", "disabled", "enabled", "present"] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Ensure IPA Sub CA is present + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: present + subca_name: AnsibleSubCA1 + subca_subject: 'CN=AnsibleSubCA1,O=example.com' + subca_desc: Ansible Sub CA + +- name: Ensure that IPA Sub CA is removed + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: absent + subca_name: AnsibleSubCA1 + +- name: Ensure that IPA Sub CA is disabled + community.general.ipa_subca: + ipa_host: spider.example.com + ipa_pass: Passw0rd! + state: disable + subca_name: AnsibleSubCA1 +''' + +RETURN = r''' +subca: + description: IPA Sub CA record as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class SubCAIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SubCAIPAClient, self).__init__(module, host, port, protocol) + + def subca_find(self, subca_name): + return self._post_json(method='ca_find', name=subca_name, item=None) + + def subca_add(self, subca_name=None, subject_dn=None, details=None): + item = dict(ipacasubjectdn=subject_dn) + subca_desc = details.get('description', None) + if subca_desc is not None: + item.update(description=subca_desc) + return self._post_json(method='ca_add', name=subca_name, item=item) + + def subca_mod(self, subca_name=None, diff=None, details=None): + item = get_subca_dict(details) + for change in diff: + update_detail = dict() + if item[change] is not None: + update_detail.update(setattr="{0}={1}".format(change, item[change])) + self._post_json(method='ca_mod', name=subca_name, item=update_detail) + + def subca_del(self, subca_name=None): + return self._post_json(method='ca_del', name=subca_name) + + def subca_disable(self, subca_name=None): + return self._post_json(method='ca_disable', name=subca_name) + + def subca_enable(self, subca_name=None): + return self._post_json(method='ca_enable', name=subca_name) + + +def get_subca_dict(details=None): + module_subca = dict() + if details['description'] is not None: + module_subca['description'] = details['description'] + if details['subca_subject'] is not None: + module_subca['ipacasubjectdn'] = details['subca_subject'] + return module_subca + + +def get_subca_diff(client, ipa_subca, module_subca): + details = get_subca_dict(module_subca) + return client.get_diff(ipa_data=ipa_subca, module_data=details) + + +def ensure(module, client): + subca_name = module.params['subca_name'] + subca_subject_dn = module.params['subca_subject'] + subca_desc = module.params['subca_desc'] + + state = module.params['state'] + + ipa_subca = client.subca_find(subca_name) + module_subca = dict(description=subca_desc, + subca_subject=subca_subject_dn) + + changed = False + if state == 'present': + if not ipa_subca: + changed = True + if not module.check_mode: + client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca) + else: + diff = get_subca_diff(client, ipa_subca, module_subca) + # IPA does not allow to modify Sub CA's subject DN + # So skip it for now. + if 'ipacasubjectdn' in diff: + diff.remove('ipacasubjectdn') + del module_subca['subca_subject'] + + if len(diff) > 0: + changed = True + if not module.check_mode: + client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca) + elif state == 'absent': + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_del(subca_name=subca_name) + elif state == 'disable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_disable(subca_name=subca_name) + elif state == 'enable': + ipa_version = client.get_ipa_version() + if LooseVersion(ipa_version) < LooseVersion('4.4.2'): + module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to " + "version greater than 4.4.2") + if ipa_subca: + changed = True + if not module.check_mode: + client.subca_enable(subca_name=subca_name) + + return changed, client.subca_find(subca_name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']), + subca_subject=dict(type='str', required=True), + subca_desc=dict(type='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']),) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True,) + + client = SubCAIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, record = ensure(module, client) + module.exit_json(changed=changed, record=record) + except Exception as exc: + module.fail_json(msg=to_native(exc)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py new file mode 100644 index 000000000..d3139ba1c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_sudocmd +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command +description: +- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + sudocmd: + description: + - Sudo command. + aliases: ['name'] + required: true + type: str + description: + description: + - A description of this command. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure sudo command exists + community.general.ipa_sudocmd: + name: su + description: Allow to run su via sudo + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command does not exist + community.general.ipa_sudocmd: + name: su + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudocmd: + description: Sudo command as return from IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class SudoCmdIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) + + def sudocmd_find(self, name): + return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) + + def sudocmd_add(self, name, item): + return self._post_json(method='sudocmd_add', name=name, item=item) + + def sudocmd_mod(self, name, item): + return self._post_json(method='sudocmd_mod', name=name, item=item) + + def sudocmd_del(self, name): + return self._post_json(method='sudocmd_del', name=name) + + +def get_sudocmd_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd): + return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd) + + +def ensure(module, client): + name = module.params['sudocmd'] + state = module.params['state'] + + module_sudocmd = get_sudocmd_dict(description=module.params['description']) + ipa_sudocmd = client.sudocmd_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_add(name=name, item=module_sudocmd) + else: + diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmd.get(key) + client.sudocmd_mod(name=name, item=data) + else: + if ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_del(name=name) + + return changed, client.sudocmd_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='str', required=True, aliases=['name'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmd = ensure(module, client) + module.exit_json(changed=changed, sudocmd=sudocmd) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py new file mode 100644 index 000000000..a768e74a1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_sudocmdgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command group +description: +- Add, modify or delete sudo command group within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Sudo Command Group. + aliases: ['name'] + required: true + type: str + description: + description: + - Group description. + type: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str + sudocmd: + description: + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands will be removed from the group. + - If option is omitted sudo commands will not be checked or changed. + type: list + elements: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure sudo command group exists + community.general.ipa_sudocmdgroup: + name: group01 + description: Group of important commands + sudocmd: + - su + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command group does not exist + community.general.ipa_sudocmdgroup: + name: group01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudocmdgroup: + description: Sudo command group as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class SudoCmdGroupIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) + + def sudocmdgroup_find(self, name): + return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) + + def sudocmdgroup_add(self, name, item): + return self._post_json(method='sudocmdgroup_add', name=name, item=item) + + def sudocmdgroup_mod(self, name, item): + return self._post_json(method='sudocmdgroup_mod', name=name, item=item) + + def sudocmdgroup_del(self, name): + return self._post_json(method='sudocmdgroup_del', name=name) + + def sudocmdgroup_add_member(self, name, item): + return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) + + def sudocmdgroup_add_member_sudocmd(self, name, item): + return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) + + def sudocmdgroup_remove_member(self, name, item): + return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) + + def sudocmdgroup_remove_member_sudocmd(self, name, item): + return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) + + +def get_sudocmdgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup): + return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup) + + +def ensure(module, client): + name = module.params['cn'] + state = module.params['state'] + sudocmd = module.params['sudocmd'] + + module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) + ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmdgroup: + changed = True + if not module.check_mode: + ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) + else: + diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmdgroup.get(key) + client.sudocmdgroup_mod(name=name, item=data) + + if sudocmd is not None: + changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, + client.sudocmdgroup_add_member_sudocmd, + client.sudocmdgroup_remove_member_sudocmd) + else: + if ipa_sudocmdgroup: + changed = True + if not module.check_mode: + client.sudocmdgroup_del(name=name) + + return changed, client.sudocmdgroup_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='list', elements='str')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = SudoCmdGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmdgroup = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudocmdgroup) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py new file mode 100644 index 000000000..59b4eb19e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_sudorule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo rule +description: +- Add, modify or delete sudo rule within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + type: str + cmdcategory: + description: + - Command category the rule applies to. + choices: ['all'] + type: str + cmd: + description: + - List of commands assigned to the rule. + - If an empty list is passed all commands will be removed from the rule. + - If option is omitted commands will not be checked or changed. + type: list + elements: str + cmdgroup: + description: + - List of command groups assigned to the rule. + - If an empty list is passed all command groups will be removed from the rule. + - If option is omitted command groups will not be checked or changed. + type: list + elements: str + version_added: 2.0.0 + description: + description: + - Description of the sudo rule. + type: str + host: + description: + - List of hosts assigned to the rule. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + - Option C(hostcategory) must be omitted to assign hosts. + type: list + elements: str + hostcategory: + description: + - Host category the rule applies to. + - If 'all' is passed one must omit C(host) and C(hostgroup). + - Option C(host) and C(hostgroup) must be omitted to assign 'all'. + choices: ['all'] + type: str + hostgroup: + description: + - List of host groups assigned to the rule. + - If an empty list is passed all host groups will be removed from the rule. + - If option is omitted host groups will not be checked or changed. + - Option C(hostcategory) must be omitted to assign host groups. + type: list + elements: str + runasextusers: + description: + - List of external RunAs users + type: list + elements: str + version_added: 2.3.0 + runasusercategory: + description: + - RunAs User category the rule applies to. + choices: ['all'] + type: str + runasgroupcategory: + description: + - RunAs Group category the rule applies to. + choices: ['all'] + type: str + sudoopt: + description: + - List of options to add to the sudo rule. + type: list + elements: str + user: + description: + - List of users assigned to the rule. + - If an empty list is passed all users will be removed from the rule. + - If option is omitted users will not be checked or changed. + type: list + elements: str + usercategory: + description: + - User category the rule applies to. + choices: ['all'] + type: str + usergroup: + description: + - List of user groups assigned to the rule. + - If an empty list is passed all user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + type: list + elements: str + state: + description: State to ensure. + default: present + choices: ['absent', 'disabled', 'enabled', 'present'] + type: str +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. + community.general.ipa_sudorule: + name: sudo_all_nopasswd + cmdcategory: all + description: Allow to run every command with sudo without password + hostcategory: all + sudoopt: + - '!authenticate' + usercategory: all + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. + community.general.ipa_sudorule: + name: sudo_dev_dbserver + description: Allow developers to run every command with sudo on all database server + cmdcategory: all + host: + - db01.example.com + hostgroup: + - db-server + sudoopt: + - '!authenticate' + usergroup: + - developers + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root. + community.general.ipa_sudorule: + name: sudo_operations_all + description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. + cmdgroup: + - operations-cmdgroup + hostcategory: all + runasextusers: + - root + sudoopt: + - '!authenticate' + usergroup: + - operators + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +sudorule: + description: Sudorule as returned by IPA + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class SudoRuleIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) + + def sudorule_find(self, name): + return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) + + def sudorule_add(self, name, item): + return self._post_json(method='sudorule_add', name=name, item=item) + + def sudorule_add_runasuser(self, name, item): + return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) + + def sudorule_remove_runasuser(self, name, item): + return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) + + def sudorule_mod(self, name, item): + return self._post_json(method='sudorule_mod', name=name, item=item) + + def sudorule_del(self, name): + return self._post_json(method='sudorule_del', name=name) + + def sudorule_add_option(self, name, item): + return self._post_json(method='sudorule_add_option', name=name, item=item) + + def sudorule_add_option_ipasudoopt(self, name, item): + return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) + + def sudorule_remove_option(self, name, item): + return self._post_json(method='sudorule_remove_option', name=name, item=item) + + def sudorule_remove_option_ipasudoopt(self, name, item): + return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) + + def sudorule_add_host(self, name, item): + return self._post_json(method='sudorule_add_host', name=name, item=item) + + def sudorule_add_host_host(self, name, item): + return self.sudorule_add_host(name=name, item={'host': item}) + + def sudorule_add_host_hostgroup(self, name, item): + return self.sudorule_add_host(name=name, item={'hostgroup': item}) + + def sudorule_remove_host(self, name, item): + return self._post_json(method='sudorule_remove_host', name=name, item=item) + + def sudorule_remove_host_host(self, name, item): + return self.sudorule_remove_host(name=name, item={'host': item}) + + def sudorule_remove_host_hostgroup(self, name, item): + return self.sudorule_remove_host(name=name, item={'hostgroup': item}) + + def sudorule_add_allow_command(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) + + def sudorule_add_allow_command_group(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) + + def sudorule_remove_allow_command(self, name, item): + return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) + + def sudorule_add_user(self, name, item): + return self._post_json(method='sudorule_add_user', name=name, item=item) + + def sudorule_add_user_user(self, name, item): + return self.sudorule_add_user(name=name, item={'user': item}) + + def sudorule_add_user_group(self, name, item): + return self.sudorule_add_user(name=name, item={'group': item}) + + def sudorule_remove_user(self, name, item): + return self._post_json(method='sudorule_remove_user', name=name, item=item) + + def sudorule_remove_user_user(self, name, item): + return self.sudorule_remove_user(name=name, item={'user': item}) + + def sudorule_remove_user_group(self, name, item): + return self.sudorule_remove_user(name=name, item={'group': item}) + + +def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, + runasgroupcategory=None, runasusercategory=None): + data = {} + if cmdcategory is not None: + data['cmdcategory'] = cmdcategory + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if usercategory is not None: + data['usercategory'] = usercategory + if runasusercategory is not None: + data['ipasudorunasusercategory'] = runasusercategory + if runasgroupcategory is not None: + data['ipasudorunasgroupcategory'] = runasgroupcategory + return data + + +def category_changed(module, client, category_name, ipa_sudorule): + if ipa_sudorule.get(category_name, None) == ['all']: + if not module.check_mode: + # cn is returned as list even with only a single value. + client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) + return True + return False + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + cmd = module.params['cmd'] + cmdgroup = module.params['cmdgroup'] + cmdcategory = module.params['cmdcategory'] + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + runasusercategory = module.params['runasusercategory'] + runasgroupcategory = module.params['runasgroupcategory'] + runasextusers = module.params['runasextusers'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + sudoopt = module.params['sudoopt'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, + description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + usercategory=usercategory, + runasusercategory=runasusercategory, + runasgroupcategory=runasgroupcategory) + ipa_sudorule = client.sudorule_find(name=name) + + changed = False + if state in ['present', 'disabled', 'enabled']: + if not ipa_sudorule: + changed = True + if not module.check_mode: + ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) + else: + diff = client.get_diff(ipa_sudorule, module_sudorule) + if len(diff) > 0: + changed = True + if not module.check_mode: + if 'hostcategory' in diff: + if ipa_sudorule.get('memberhost_host', None) is not None: + client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) + if ipa_sudorule.get('memberhost_hostgroup', None) is not None: + client.sudorule_remove_host_hostgroup(name=name, + item=ipa_sudorule.get('memberhost_hostgroup')) + + client.sudorule_mod(name=name, item=module_sudorule) + + if cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command(name=name, item=cmd) + + if cmdgroup is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command_group(name=name, item=cmdgroup) + + if runasusercategory is not None: + changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed + + if runasgroupcategory is not None: + changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed + + if host is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, + client.sudorule_add_host_host, + client.sudorule_remove_host_host) or changed + + if hostgroup is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, + client.sudorule_add_host_hostgroup, + client.sudorule_remove_host_hostgroup) or changed + if sudoopt is not None: + # client.modify_if_diff does not work as each option must be removed/added by its own + ipa_list = ipa_sudorule.get('ipasudoopt', []) + module_list = sudoopt + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_option_ipasudoopt(name, item) + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_option_ipasudoopt(name, item) + + if runasextusers is not None: + ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) + diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_runasuser(name=name, item=item) + diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_runasuser(name=name, item=item) + + if user is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, + client.sudorule_add_user_user, + client.sudorule_remove_user_user) or changed + if usergroup is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, + client.sudorule_add_user_group, + client.sudorule_remove_user_group) or changed + else: + if ipa_sudorule: + changed = True + if not module.check_mode: + client.sudorule_del(name) + + return changed, client.sudorule_find(name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cmd=dict(type='list', elements='str'), + cmdgroup=dict(type='list', elements='str'), + cmdcategory=dict(type='str', choices=['all']), + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + host=dict(type='list', elements='str'), + hostcategory=dict(type='str', choices=['all']), + hostgroup=dict(type='list', elements='str'), + runasusercategory=dict(type='str', choices=['all']), + runasgroupcategory=dict(type='str', choices=['all']), + sudoopt=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', elements='str'), + usercategory=dict(type='str', choices=['all']), + usergroup=dict(type='list', elements='str'), + runasextusers=dict(type='list', elements='str')) + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[['cmdcategory', 'cmd'], + ['cmdcategory', 'cmdgroup'], + ['hostcategory', 'host'], + ['hostcategory', 'hostgroup'], + ['usercategory', 'user'], + ['usercategory', 'usergroup']], + supports_check_mode=True) + + client = SudoRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudorule = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudorule) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_user.py b/ansible_collections/community/general/plugins/modules/ipa_user.py new file mode 100644 index 000000000..17b72176e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_user.py @@ -0,0 +1,404 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_user +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA users +description: +- Add, modify and delete user within IPA server. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + displayname: + description: Display name. + type: str + update_password: + description: + - Set password for a user. + type: str + default: 'always' + choices: [ always, on_create ] + givenname: + description: First name. + type: str + krbpasswordexpiration: + description: + - Date at which the user password will expire. + - In the format YYYYMMddHHmmss. + - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. + type: str + loginshell: + description: Login shell. + type: str + mail: + description: + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses will be deleted. + - If None is passed email addresses will not be checked or changed. + type: list + elements: str + password: + description: + - Password for a user. + - Will not be set for an existing user unless I(update_password=always), which is the default. + type: str + sn: + description: Surname. + type: str + sshpubkey: + description: + - List of public SSH key. + - If an empty list is passed all assigned public keys will be deleted. + - If None is passed SSH public keys will not be checked or changed. + type: list + elements: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "disabled", "enabled", "present"] + type: str + telephonenumber: + description: + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers will be deleted. + - If None is passed telephone numbers will not be checked or changed. + type: list + elements: str + title: + description: Title. + type: str + uid: + description: uid of the user. + required: true + aliases: ["name"] + type: str + uidnumber: + description: + - Account Settings UID/Posix User ID number. + type: str + gidnumber: + description: + - Posix Group ID. + type: str + homedirectory: + description: + - Default home directory of the user. + type: str + version_added: '0.2.0' + userauthtype: + description: + - The authentication type to use for the user. + choices: ["password", "radius", "otp", "pkinit", "hardened"] + type: list + elements: str + version_added: '1.2.0' +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +requirements: +- base64 +- hashlib +''' + +EXAMPLES = r''' +- name: Ensure pinky is present and always reset password + community.general.ipa_user: + name: pinky + state: present + krbpasswordexpiration: 20200119235959 + givenname: Pinky + sn: Acme + mail: + - pinky@acme.com + telephonenumber: + - '+555123456' + sshpubkey: + - ssh-rsa .... + - ssh-dsa .... + uidnumber: '1001' + gidnumber: '100' + homedirectory: /home/pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure brain is absent + community.general.ipa_user: + name: brain + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure pinky is present but don't reset password if already exists + community.general.ipa_user: + name: pinky + state: present + givenname: Pinky + sn: Acme + password: zounds + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + update_password: on_create + +- name: Ensure pinky is present and using one time password and RADIUS authentication + community.general.ipa_user: + name: pinky + state: present + userauthtype: + - otp + - radius + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +user: + description: User as returned by IPA API + returned: always + type: dict +''' + +import base64 +import hashlib +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class UserIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(UserIPAClient, self).__init__(module, host, port, protocol) + + def user_find(self, name): + return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) + + def user_add(self, name, item): + return self._post_json(method='user_add', name=name, item=item) + + def user_mod(self, name, item): + return self._post_json(method='user_mod', name=name, item=item) + + def user_del(self, name): + return self._post_json(method='user_del', name=name) + + def user_disable(self, name): + return self._post_json(method='user_disable', name=name) + + def user_enable(self, name): + return self._post_json(method='user_enable', name=name) + + +def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None, + mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None, + title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None, + userauthtype=None): + user = {} + if displayname is not None: + user['displayname'] = displayname + if krbpasswordexpiration is not None: + user['krbpasswordexpiration'] = krbpasswordexpiration + "Z" + if givenname is not None: + user['givenname'] = givenname + if loginshell is not None: + user['loginshell'] = loginshell + if mail is not None: + user['mail'] = mail + user['nsaccountlock'] = nsaccountlock + if sn is not None: + user['sn'] = sn + if sshpubkey is not None: + user['ipasshpubkey'] = sshpubkey + if telephonenumber is not None: + user['telephonenumber'] = telephonenumber + if title is not None: + user['title'] = title + if userpassword is not None: + user['userpassword'] = userpassword + if gidnumber is not None: + user['gidnumber'] = gidnumber + if uidnumber is not None: + user['uidnumber'] = uidnumber + if homedirectory is not None: + user['homedirectory'] = homedirectory + if userauthtype is not None: + user['ipauserauthtype'] = userauthtype + + return user + + +def get_user_diff(client, ipa_user, module_user): + """ + Return the keys of each dict whereas values are different. Unfortunately the IPA + API returns everything as a list even if only a single value is possible. + Therefore some more complexity is needed. + The method will check if the value type of module_user.attr is not a list and + create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method + must not be changed if the returned API dict is changed. + :param ipa_user: + :param module_user: + :return: + """ + # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. + # These are used for comparison. + sshpubkey = None + if 'ipasshpubkey' in module_user: + hash_algo = 'md5' + if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': + hash_algo = 'sha256' + module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] + # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on + sshpubkey = module_user['ipasshpubkey'] + del module_user['ipasshpubkey'] + + result = client.get_diff(ipa_data=ipa_user, module_data=module_user) + + # If there are public keys, remove the fingerprints and add them back to the dict + if sshpubkey is not None: + del module_user['sshpubkeyfp'] + module_user['ipasshpubkey'] = sshpubkey + return result + + +def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): + """ + Return the public key fingerprint of a given public SSH key + in format "[fp] [comment] (ssh-rsa)" where fp is of the format: + FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 + for md5 or + SHA256:[base64] + for sha256 + Comments are assumed to be all characters past the second + whitespace character in the sshpubkey string. + :param ssh_key: + :param hash_algo: + :return: + """ + parts = ssh_key.strip().split(None, 2) + if len(parts) == 0: + return None + key_type = parts[0] + key = base64.b64decode(parts[1].encode('ascii')) + + if hash_algo == 'md5': + fp_plain = hashlib.md5(key).hexdigest() + key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() + elif hash_algo == 'sha256': + fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=') + key_fp = 'SHA256:{fp}'.format(fp=fp_plain) + if len(parts) < 3: + return "%s (%s)" % (key_fp, key_type) + else: + comment = parts[2] + return "%s %s (%s)" % (key_fp, comment, key_type) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['uid'] + nsaccountlock = state == 'disabled' + + module_user = get_user_dict(displayname=module.params.get('displayname'), + krbpasswordexpiration=module.params.get('krbpasswordexpiration'), + givenname=module.params.get('givenname'), + loginshell=module.params['loginshell'], + mail=module.params['mail'], sn=module.params['sn'], + sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, + telephonenumber=module.params['telephonenumber'], title=module.params['title'], + userpassword=module.params['password'], + gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'), + homedirectory=module.params.get('homedirectory'), + userauthtype=module.params.get('userauthtype')) + + update_password = module.params.get('update_password') + ipa_user = client.user_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_user: + changed = True + if not module.check_mode: + ipa_user = client.user_add(name=name, item=module_user) + else: + if update_password == 'on_create': + module_user.pop('userpassword', None) + diff = get_user_diff(client, ipa_user, module_user) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_user = client.user_mod(name=name, item=module_user) + else: + if ipa_user: + changed = True + if not module.check_mode: + client.user_del(name) + + return changed, ipa_user + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(displayname=dict(type='str'), + givenname=dict(type='str'), + update_password=dict(type='str', default="always", + choices=['always', 'on_create'], + no_log=False), + krbpasswordexpiration=dict(type='str', no_log=False), + loginshell=dict(type='str'), + mail=dict(type='list', elements='str'), + sn=dict(type='str'), + uid=dict(type='str', required=True, aliases=['name']), + gidnumber=dict(type='str'), + uidnumber=dict(type='str'), + password=dict(type='str', no_log=True), + sshpubkey=dict(type='list', elements='str'), + state=dict(type='str', default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + telephonenumber=dict(type='list', elements='str'), + title=dict(type='str'), + homedirectory=dict(type='str'), + userauthtype=dict(type='list', elements='str', + choices=['password', 'radius', 'otp', 'pkinit', 'hardened'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = UserIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). + # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey + # as different which should be avoided. + if module.params['sshpubkey'] is not None: + if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "": + module.params['sshpubkey'] = None + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipa_vault.py b/ansible_collections/community/general/plugins/modules/ipa_vault.py new file mode 100644 index 000000000..84b72c1ab --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipa_vault.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Juan Manuel Parrilla +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_vault +author: Juan Manuel Parrilla (@jparrill) +short_description: Manage FreeIPA vaults +description: +- Add, modify and delete vaults and secret vaults. +- KRA service should be enabled to use this module. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + cn: + description: + - Vault name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + ipavaulttype: + description: + - Vault types are based on security level. + default: "symmetric" + choices: ["asymmetric", "standard", "symmetric"] + aliases: ["vault_type"] + type: str + ipavaultpublickey: + description: + - Public key. + aliases: ["vault_public_key"] + type: str + ipavaultsalt: + description: + - Vault Salt. + aliases: ["vault_salt"] + type: str + username: + description: + - Any user can own one or more user vaults. + - Mutually exclusive with service. + aliases: ["user"] + type: list + elements: str + service: + description: + - Any service can own one or more service vaults. + - Mutually exclusive with user. + type: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + replace: + description: + - Force replace the existent vault on IPA server. + type: bool + default: false + choices: ["True", "False"] + validate_certs: + description: + - Validate IPA server certificates. + type: bool + default: true +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.attributes + +''' + +EXAMPLES = r''' +- name: Ensure vault is present + community.general.ipa_vault: + name: vault01 + vault_type: standard + user: user01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + validate_certs: false + +- name: Ensure vault is present for Admin user + community.general.ipa_vault: + name: vault01 + vault_type: standard + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure vault is absent + community.general.ipa_vault: + name: vault01 + vault_type: standard + user: user01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Modify vault if already exists + community.general.ipa_vault: + name: vault01 + vault_type: standard + description: "Vault for test" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + replace: true + +- name: Get vault info if already exists + community.general.ipa_vault: + name: vault01 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +vault: + description: Vault as returned by IPA API + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class VaultIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(VaultIPAClient, self).__init__(module, host, port, protocol) + + def vault_find(self, name): + return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name}) + + def vault_add_internal(self, name, item): + return self._post_json(method='vault_add_internal', name=name, item=item) + + def vault_mod_internal(self, name, item): + return self._post_json(method='vault_mod_internal', name=name, item=item) + + def vault_del(self, name): + return self._post_json(method='vault_del', name=name) + + +def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None): + vault = {} + + if description is not None: + vault['description'] = description + if vault_type is not None: + vault['ipavaulttype'] = vault_type + if vault_salt is not None: + vault['ipavaultsalt'] = vault_salt + if vault_public_key is not None: + vault['ipavaultpublickey'] = vault_public_key + if service is not None: + vault['service'] = service + return vault + + +def get_vault_diff(client, ipa_vault, module_vault, module): + return client.get_diff(ipa_data=ipa_vault, module_data=module_vault) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['cn'] + user = module.params['username'] + replace = module.params['replace'] + + module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'], + vault_salt=module.params['ipavaultsalt'], + vault_public_key=module.params['ipavaultpublickey'], + service=module.params['service']) + ipa_vault = client.vault_find(name=name) + + changed = False + if state == 'present': + if not ipa_vault: + # New vault + changed = True + if not module.check_mode: + ipa_vault = client.vault_add_internal(name, item=module_vault) + else: + # Already exists + if replace: + diff = get_vault_diff(client, ipa_vault, module_vault, module) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_vault.get(key) + client.vault_mod_internal(name=name, item=data) + + else: + if ipa_vault: + changed = True + if not module.check_mode: + client.vault_del(name) + + return changed, client.vault_find(name=name) + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str'), + ipavaulttype=dict(type='str', default='symmetric', + choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']), + ipavaultsalt=dict(type='str', aliases=['vault_salt']), + ipavaultpublickey=dict(type='str', aliases=['vault_public_key']), + service=dict(type='str'), + replace=dict(type='bool', default=False, choices=[True, False]), + state=dict(type='str', default='present', choices=['present', 'absent']), + username=dict(type='list', elements='str', aliases=['user'])) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['username', 'service']]) + + client = VaultIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, vault = ensure(module, client) + module.exit_json(changed=changed, vault=vault) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipify_facts.py b/ansible_collections/community/general/plugins/modules/ipify_facts.py new file mode 100644 index 000000000..ab96d7e94 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipify_facts.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ipify_facts +short_description: Retrieve the public IP of your internet gateway +description: + - If behind NAT and need to know the public IP of your internet gateway. +author: +- René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + api_url: + description: + - URL of the ipify.org API service. + - C(?format=json) will be appended per default. + type: str + default: https://api.ipify.org/ + timeout: + description: + - HTTP connection timeout in seconds. + type: int + default: 10 + validate_certs: + description: + - When set to C(NO), SSL certificates will not be validated. + type: bool + default: true +notes: + - Visit https://www.ipify.org to get more information. +''' + +EXAMPLES = r''' +# Gather IP facts from ipify.org +- name: Get my public IP + community.general.ipify_facts: + +# Gather IP facts from your own ipify service endpoint with a custom timeout +- name: Get my public IP + community.general.ipify_facts: + api_url: http://api.example.com/ipify + timeout: 20 +''' + +RETURN = r''' +--- +ipify_public_ip: + description: Public IP of the internet gateway. + returned: success + type: str + sample: 1.2.3.4 +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text + + +class IpifyFacts(object): + + def __init__(self): + self.api_url = module.params.get('api_url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'ipify_public_ip': None + } + (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout) + + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) + + data = json.loads(to_text(response.read())) + result['ipify_public_ip'] = data.get('ip') + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_url=dict(type='str', default='https://api.ipify.org/'), + timeout=dict(type='int', default=10), + validate_certs=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + ipify_facts = IpifyFacts().run() + ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) + module.exit_json(**ipify_facts_result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py b/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py new file mode 100644 index 000000000..f29b3cbf4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Aleksei Kostiuk +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ipinfoio_facts +short_description: Retrieve IP geolocation facts of a host's IP address +description: + - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" +author: "Aleksei Kostiuk (@akostyuk)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + timeout: + description: + - HTTP connection timeout in seconds + required: false + default: 10 + type: int + http_agent: + description: + - Set http user agent + required: false + default: "ansible-ipinfoio-module/0.0.1" + type: str +notes: + - "Check http://ipinfo.io/ for more information" +''' + +EXAMPLES = ''' +# Retrieve geolocation data of a host's IP address +- name: Get IP geolocation data + community.general.ipinfoio_facts: +''' + +RETURN = ''' +ansible_facts: + description: "Dictionary of ip geolocation facts for a host's IP address" + returned: changed + type: complex + contains: + ip: + description: "Public IP address of a host" + type: str + sample: "8.8.8.8" + hostname: + description: Domain name + type: str + sample: "google-public-dns-a.google.com" + country: + description: ISO 3166-1 alpha-2 country code + type: str + sample: "US" + region: + description: State or province name + type: str + sample: "California" + city: + description: City name + type: str + sample: "Mountain View" + loc: + description: Latitude and Longitude of the location + type: str + sample: "37.3860,-122.0838" + org: + description: "organization's name" + type: str + sample: "AS3356 Level 3 Communications, Inc." + postal: + description: Postal code + type: str + sample: "94035" +''' +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.urls import fetch_url + + +USER_AGENT = 'ansible-ipinfoio-module/0.0.1' + + +class IpinfoioFacts(object): + + def __init__(self, module): + self.url = 'https://ipinfo.io/json' + self.timeout = module.params.get('timeout') + self.module = module + + def get_geo_data(self): + response, info = fetch_url(self.module, self.url, force=True, # NOQA + timeout=self.timeout) + try: + info['status'] == 200 + except AssertionError: + self.module.fail_json(msg='Could not get {0} page, ' + 'check for connectivity!'.format(self.url)) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipinfo.io response: ' + '{0} {1}'.format(self.url, content)) + else: + return result + + +def main(): + module = AnsibleModule( # NOQA + argument_spec=dict( + http_agent=dict(default=USER_AGENT), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + ipinfoio = IpinfoioFacts(module) + ipinfoio_result = dict( + changed=False, ansible_facts=ipinfoio.get_geo_data()) + module.exit_json(**ipinfoio_result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/ansible_collections/community/general/plugins/modules/ipmi_boot.py new file mode 100644 index 000000000..7a4d2b6ec --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipmi_boot.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ipmi_boot +short_description: Management of order of boot devices +description: + - Use this module to manage order of boot devices +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + type: str + port: + description: + - Remote RMCP port. + default: 623 + type: int + user: + description: + - Username to use to connect to the BMC. + required: true + type: str + password: + description: + - Password to connect to the BMC. + required: true + type: str + key: + description: + - Encryption key to connect to the BMC in hex format. + required: false + type: str + version_added: 4.1.0 + bootdev: + description: + - Set boot device to use on next reboot + - "The choices for the device are: + - network -- Request network boot + - floppy -- Boot from floppy + - hd -- Boot from hard drive + - safe -- Boot from hard drive, requesting 'safe mode' + - optical -- boot from CD/DVD/BD drive + - setup -- Boot into setup utility + - default -- remove any IPMI directed boot device request" + required: true + choices: + - network + - floppy + - hd + - safe + - optical + - setup + - default + type: str + state: + description: + - Whether to ensure that boot devices is desired. + - "The choices for the state are: + - present -- Request system turn on + - absent -- Request system turn on" + default: present + choices: [ present, absent ] + type: str + persistent: + description: + - If set, ask that system firmware uses this device beyond next boot. + Be aware many systems do not honor this. + type: bool + default: false + uefiboot: + description: + - If set, request UEFI boot explicitly. + Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. + In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. + type: bool + default: false +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +''' + +RETURN = ''' +bootdev: + description: The boot device name which will be used beyond next boot. + returned: success + type: str + sample: default +persistent: + description: If True, system firmware will use this device beyond next boot. + returned: success + type: bool + sample: false +uefimode: + description: If True, system firmware will use UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +''' + +EXAMPLES = ''' +- name: Ensure bootdevice is HD + community.general.ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: hd + +- name: Ensure bootdevice is not Network + community.general.ipmi_boot: + name: test.testdomain.com + user: admin + password: password + key: 1234567890AABBCCDEFF000000EEEE12 + bootdev: network + state: absent +''' + +import traceback +import binascii + +PYGHMI_IMP_ERR = None +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + key=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']), + persistent=dict(default=False, type='bool'), + uefiboot=dict(default=False, type='bool') + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + bootdev = module.params['bootdev'] + persistent = module.params['persistent'] + uefiboot = module.params['uefiboot'] + request = dict() + + if state == 'absent' and bootdev == 'default': + module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") + + try: + if module.params['key']: + key = binascii.unhexlify(module.params['key']) + else: + key = None + except Exception as e: + module.fail_json(msg="Unable to convert 'key' from hex string.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port, kg=key + ) + module.debug('ipmi instantiated - name: "%s"' % name) + current = ipmi_cmd.get_bootdev() + # uefimode may not supported by BMC, so use desired value as default + current.setdefault('uefimode', uefiboot) + if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): + request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) + elif state == 'absent' and current['bootdev'] == bootdev: + request = dict(bootdev='default') + else: + module.exit_json(changed=False, **current) + + if module.check_mode: + response = dict(bootdev=request['bootdev']) + else: + response = ipmi_cmd.set_bootdev(**request) + + if 'error' in response: + module.fail_json(msg=response['error']) + + if 'persist' in request: + response['persistent'] = request['persist'] + if 'uefiboot' in request: + response['uefimode'] = request['uefiboot'] + + module.exit_json(changed=True, **response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipmi_power.py b/ansible_collections/community/general/plugins/modules/ipmi_power.py new file mode 100644 index 000000000..e152f35eb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipmi_power.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ipmi_power +short_description: Power management for machine +description: + - Use this module for power management +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + type: str + port: + description: + - Remote RMCP port. + default: 623 + type: int + user: + description: + - Username to use to connect to the BMC. + required: true + type: str + password: + description: + - Password to connect to the BMC. + required: true + type: str + key: + description: + - Encryption key to connect to the BMC in hex format. + required: false + type: str + version_added: 4.1.0 + state: + description: + - Whether to ensure that the machine in desired state. + - "The choices for state are: + - on -- Request system turn on + - off -- Request system turn off without waiting for OS to shutdown + - shutdown -- Have system request OS proper shutdown + - reset -- Request system reset without waiting for OS + - boot -- If system is off, then 'on', else 'reset'" + - Either this option or I(machine) is required. + choices: ['on', 'off', shutdown, reset, boot] + type: str + timeout: + description: + - Maximum number of seconds before interrupt request. + default: 300 + type: int + machine: + description: + - Provide a list of the remote target address for the bridge IPMI request, + and the power status. + - Either this option or I(state) is required. + required: false + type: list + elements: dict + version_added: 4.3.0 + suboptions: + targetAddress: + description: + - Remote target address for the bridge IPMI request. + type: int + required: true + state: + description: + - Whether to ensure that the machine specified by I(targetAddress) in desired state. + - If this option is not set, the power state is set by I(state). + - If both this option and I(state) are set, this option takes precedence over I(state). + choices: ['on', 'off', shutdown, reset, boot] + type: str + +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (@bgaifullin) " +''' + +RETURN = ''' +powerstate: + description: The current power state of the machine. + returned: success and I(machine) is not provided + type: str + sample: 'on' +status: + description: The current power state of the machine when the machine option is set. + returned: success and I(machine) is provided + type: list + elements: dict + version_added: 4.3.0 + contains: + powerstate: + description: The current power state of the machine specified by I(targetAddress). + type: str + targetAddress: + description: The remote target address. + type: int + sample: [ + { + "powerstate": "on", + "targetAddress": 48, + }, + { + "powerstate": "on", + "targetAddress": 50, + }, + ] +''' + +EXAMPLES = ''' +- name: Ensure machine is powered on + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: 'on' + +- name: Ensure machines of which remote target address is 48 and 50 are powered off + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: 'off' + machine: + - targetAddress: 48 + - targetAddress: 50 + +- name: Ensure machine of which remote target address is 48 is powered on, and 50 is powered off + community.general.ipmi_power: + name: test.testdomain.com + user: admin + password: password + machine: + - targetAddress: 48 + state: 'on' + - targetAddress: 50 + state: 'off' +''' + +import traceback +import binascii + +PYGHMI_IMP_ERR = None +INVALID_TARGET_ADDRESS = 0x100 +try: + from pyghmi.ipmi import command +except ImportError: + PYGHMI_IMP_ERR = traceback.format_exc() + command = None + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + state=dict(choices=['on', 'off', 'shutdown', 'reset', 'boot']), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + key=dict(type='str', no_log=True), + timeout=dict(default=300, type='int'), + machine=dict( + type='list', elements='dict', + options=dict( + targetAddress=dict(required=True, type='int'), + state=dict(type='str', choices=['on', 'off', 'shutdown', 'reset', 'boot']), + ), + ), + ), + supports_check_mode=True, + required_one_of=( + ['state', 'machine'], + ), + ) + + if command is None: + module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR) + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + timeout = module.params['timeout'] + machine = module.params['machine'] + + try: + if module.params['key']: + key = binascii.unhexlify(module.params['key']) + else: + key = None + except Exception: + module.fail_json(msg="Unable to convert 'key' from hex string.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port, kg=key + ) + module.debug('ipmi instantiated - name: "%s"' % name) + + changed = False + if machine is None: + current = ipmi_cmd.get_power() + if current['powerstate'] != state: + response = {'powerstate': state} if module.check_mode \ + else ipmi_cmd.set_power(state, wait=timeout) + changed = True + else: + response = current + + if 'error' in response: + module.fail_json(msg=response['error']) + + module.exit_json(changed=changed, **response) + else: + response = [] + for entry in machine: + taddr = entry['targetAddress'] + if taddr >= INVALID_TARGET_ADDRESS: + module.fail_json(msg="targetAddress should be set between 0 to 255.") + + try: + # bridge_request is supported on pyghmi 1.5.30 and later + current = ipmi_cmd.get_power(bridge_request={"addr": taddr}) + except TypeError: + module.fail_json( + msg="targetAddress isn't supported on the installed pyghmi.") + + if entry['state']: + tstate = entry['state'] + elif state: + tstate = state + else: + module.fail_json(msg="Either state or suboption of machine state should be set.") + + if current['powerstate'] != tstate: + changed = True + if not module.check_mode: + new = ipmi_cmd.set_power(tstate, wait=timeout, bridge_request={"addr": taddr}) + if 'error' in new: + module.fail_json(msg=new['error']) + + response.append( + {'targetAddress:': taddr, 'powerstate': new['powerstate']}) + + if current['powerstate'] == tstate or module.check_mode: + response.append({'targetAddress:': taddr, 'powerstate': tstate}) + + module.exit_json(changed=changed, status=response) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/iptables_state.py b/ansible_collections/community/general/plugins/modules/iptables_state.py new file mode 100644 index 000000000..d0ea7ad79 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/iptables_state.py @@ -0,0 +1,654 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: iptables_state +short_description: Save iptables state into a file or restore it from a file +version_added: '1.1.0' +author: quidame (@quidame) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow +description: + - C(iptables) is used to set up, maintain, and inspect the tables of IP + packet filter rules in the Linux kernel. + - This module handles the saving and/or loading of rules. This is the same + as the behaviour of the C(iptables-save) and C(iptables-restore) (or + C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this + module uses internally. + - Modifying the state of the firewall remotely may lead to loose access to + the host in case of mistake in new ruleset. This module embeds a rollback + feature to avoid this, by telling the host to restore previous rules if a + cookie is still there after a given delay, and all this time telling the + controller to try to remove this cookie on the host through a new + connection. +notes: + - The rollback feature is not a module option and depends on task's + attributes. To enable it, the module must be played asynchronously, i.e. + by setting task attributes I(poll) to C(0), and I(async) to a value less + or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will + still happen if it shall happen, but you will experience a connection + timeout instead of more relevant info returned by the module after its + failure. +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full +options: + counters: + description: + - Save or restore the values of all packet and byte counters. + - When C(true), the module is not idempotent. + type: bool + default: false + ip_version: + description: + - Which version of the IP protocol this module should apply to. + type: str + choices: [ ipv4, ipv6 ] + default: ipv4 + modprobe: + description: + - Specify the path to the C(modprobe) program internally used by iptables + related commands to load kernel modules. + - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the + executable's path. + type: path + noflush: + description: + - For I(state=restored), ignored otherwise. + - If C(false), restoring iptables rules from a file flushes (deletes) + all previous contents of the respective table(s). If C(true), the + previous rules are left untouched (but policies are updated anyway, + for all built-in chains). + type: bool + default: false + path: + description: + - The file the iptables state should be saved to. + - The file the iptables state should be restored from. + type: path + required: true + state: + description: + - Whether the firewall state should be saved (into a file) or restored + (from a file). + type: str + choices: [ saved, restored ] + required: true + table: + description: + - When I(state=restored), restore only the named table even if the input + file contains other tables. Fail if the named table is not declared in + the file. + - When I(state=saved), restrict output to the specified table. If not + specified, output includes all active tables. + type: str + choices: [ filter, nat, mangle, raw, security ] + wait: + description: + - Wait N seconds for the xtables lock to prevent instant failure in case + multiple instances of the program are running concurrently. + type: int +requirements: [iptables, ip6tables] +''' + +EXAMPLES = r''' +# This will apply to all loaded/active IPv4 tables. +- name: Save current state of the firewall in system file + community.general.iptables_state: + state: saved + path: /etc/sysconfig/iptables + +# This will apply only to IPv6 filter table. +- name: save current state of the firewall in system file + community.general.iptables_state: + ip_version: ipv6 + table: filter + state: saved + path: /etc/iptables/rules.v6 + +# This will load a state from a file, with a rollback in case of access loss +- name: restore firewall state from a file + community.general.iptables_state: + state: restored + path: /run/iptables.apply + async: "{{ ansible_timeout }}" + poll: 0 + +# This will load new rules by appending them to the current ones +- name: restore firewall state from a file + community.general.iptables_state: + state: restored + path: /run/iptables.apply + noflush: true + async: "{{ ansible_timeout }}" + poll: 0 + +# This will only retrieve information +- name: get current state of the firewall + community.general.iptables_state: + state: saved + path: /tmp/iptables + check_mode: true + changed_when: false + register: iptables_state + +- name: show current state of the firewall + ansible.builtin.debug: + var: iptables_state.initial_state +''' + +RETURN = r''' +applied: + description: Whether or not the wanted state has been successfully restored. + type: bool + returned: always + sample: true +initial_state: + description: The current state of the firewall when module starts. + type: list + elements: str + returned: always + sample: [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT ACCEPT [0:0]", + ":FORWARD ACCEPT [0:0]", + ":OUTPUT ACCEPT [0:0]", + "COMMIT", + "# Completed" + ] +restored: + description: The state the module restored, whenever it is finally applied or not. + type: list + elements: str + returned: always + sample: [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT DROP [0:0]", + ":FORWARD DROP [0:0]", + ":OUTPUT ACCEPT [0:0]", + "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT", + "-A INPUT -m conntrack --ctstate INVALID -j DROP", + "-A INPUT -i lo -j ACCEPT", + "-A INPUT -p icmp -j ACCEPT", + "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT", + "COMMIT", + "# Completed" + ] +saved: + description: The iptables state the module saved. + type: list + elements: str + returned: always + sample: [ + "# Generated by xtables-save v1.8.2", + "*filter", + ":INPUT ACCEPT [0:0]", + ":FORWARD DROP [0:0]", + ":OUTPUT ACCEPT [0:0]", + "COMMIT", + "# Completed" + ] +tables: + description: The iptables we have interest for when module starts. + type: dict + contains: + table: + description: Policies and rules for all chains of the named table. + type: list + elements: str + sample: |- + { + "filter": [ + ":INPUT ACCEPT", + ":FORWARD ACCEPT", + ":OUTPUT ACCEPT", + "-A INPUT -i lo -j ACCEPT", + "-A INPUT -p icmp -j ACCEPT", + "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT", + "-A INPUT -j REJECT --reject-with icmp-host-prohibited" + ], + "nat": [ + ":PREROUTING ACCEPT", + ":INPUT ACCEPT", + ":OUTPUT ACCEPT", + ":POSTROUTING ACCEPT" + ] + } + returned: always +''' + + +import re +import os +import time +import tempfile +import filecmp +import shutil + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +IPTABLES = dict( + ipv4='iptables', + ipv6='ip6tables', +) + +SAVE = dict( + ipv4='iptables-save', + ipv6='ip6tables-save', +) + +RESTORE = dict( + ipv4='iptables-restore', + ipv6='ip6tables-restore', +) + +TABLES = ['filter', 'mangle', 'nat', 'raw', 'security'] + + +def read_state(b_path): + ''' + Read a file and store its content in a variable as a list. + ''' + with open(b_path, 'r') as f: + text = f.read() + return [t for t in text.splitlines() if t != ''] + + +def write_state(b_path, lines, changed): + ''' + Write given contents to the given path, and return changed status. + ''' + # Populate a temporary file + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write("{0}\n".format("\n".join(lines))) + + # Prepare to copy temporary file to the final destination + if not os.path.exists(b_path): + b_destdir = os.path.dirname(b_path) + destdir = to_native(b_destdir, errors='surrogate_or_strict') + if b_destdir and not os.path.exists(b_destdir) and not module.check_mode: + try: + os.makedirs(b_destdir) + except Exception as err: + module.fail_json( + msg='Error creating %s: %s' % (destdir, to_native(err)), + initial_state=lines) + changed = True + + elif not filecmp.cmp(tmpfile, b_path): + changed = True + + # Do it + if changed and not module.check_mode: + try: + shutil.copyfile(tmpfile, b_path) + except Exception as err: + path = to_native(b_path, errors='surrogate_or_strict') + module.fail_json( + msg='Error saving state into %s: %s' % (path, to_native(err)), + initial_state=lines) + + return changed + + +def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): + ''' + This ensures iptables-state output is suitable for iptables-restore to roll + back to it, i.e. iptables-save output is not empty. This also works for the + iptables-nft-save alternative. + ''' + if table is None: + table = 'filter' + + commandline = list(initializer) + commandline += ['-t', table] + dummy = module.run_command(commandline, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + if '*%s' % table not in out.splitlines(): + # The last resort. + iptables_input = '*%s\n:OUTPUT ACCEPT\nCOMMIT\n' % table + dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + + return rc, out, err + + +def filter_and_format_state(string): + ''' + Remove timestamps to ensure idempotence between runs. Also remove counters + by default. And return the result as a list. + ''' + string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) + if not module.params['counters']: + string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) + lines = [line for line in string.splitlines() if line != ''] + return lines + + +def per_table_state(command, state): + ''' + Convert raw iptables-save output into usable datastructure, for reliable + comparisons between initial and final states. + ''' + tables = dict() + for t in TABLES: + COMMAND = list(command) + if '*%s' % t in state.splitlines(): + COMMAND.extend(['--table', t]) + dummy, out, dummy = module.run_command(COMMAND, check_rc=True) + out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out) + out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out) + tables[t] = [tt for tt in out.splitlines() if tt != ''] + return tables + + +def main(): + + global module + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + state=dict(type='str', choices=['saved', 'restored'], required=True), + table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']), + noflush=dict(type='bool', default=False), + counters=dict(type='bool', default=False), + modprobe=dict(type='path'), + ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'), + wait=dict(type='int'), + _timeout=dict(type='int'), + _back=dict(type='path'), + ), + required_together=[ + ['_timeout', '_back'], + ], + supports_check_mode=True, + ) + + # We'll parse iptables-restore stderr + module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C') + + path = module.params['path'] + state = module.params['state'] + table = module.params['table'] + noflush = module.params['noflush'] + counters = module.params['counters'] + modprobe = module.params['modprobe'] + ip_version = module.params['ip_version'] + wait = module.params['wait'] + _timeout = module.params['_timeout'] + _back = module.params['_back'] + + bin_iptables = module.get_bin_path(IPTABLES[ip_version], True) + bin_iptables_save = module.get_bin_path(SAVE[ip_version], True) + bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True) + + os.umask(0o077) + changed = False + COMMANDARGS = [] + INITCOMMAND = [bin_iptables_save] + INITIALIZER = [bin_iptables, '-L', '-n'] + TESTCOMMAND = [bin_iptables_restore, '--test'] + FALLBACKCMD = [bin_iptables_restore] + + if counters: + COMMANDARGS.append('--counters') + + if table is not None: + COMMANDARGS.extend(['--table', table]) + + if wait is not None: + TESTCOMMAND.extend(['--wait', '%s' % wait]) + + if modprobe is not None: + b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict') + if not os.path.exists(b_modprobe): + module.fail_json(msg="modprobe %s not found" % modprobe) + if not os.path.isfile(b_modprobe): + module.fail_json(msg="modprobe %s not a file" % modprobe) + if not os.access(b_modprobe, os.R_OK): + module.fail_json(msg="modprobe %s not readable" % modprobe) + if not os.access(b_modprobe, os.X_OK): + module.fail_json(msg="modprobe %s not executable" % modprobe) + COMMANDARGS.extend(['--modprobe', modprobe]) + INITIALIZER.extend(['--modprobe', modprobe]) + INITCOMMAND.extend(['--modprobe', modprobe]) + TESTCOMMAND.extend(['--modprobe', modprobe]) + FALLBACKCMD.extend(['--modprobe', modprobe]) + + SAVECOMMAND = list(COMMANDARGS) + SAVECOMMAND.insert(0, bin_iptables_save) + + b_path = to_bytes(path, errors='surrogate_or_strict') + + if state == 'restored': + if not os.path.exists(b_path): + module.fail_json(msg="Source %s not found" % path) + if not os.path.isfile(b_path): + module.fail_json(msg="Source %s not a file" % path) + if not os.access(b_path, os.R_OK): + module.fail_json(msg="Source %s not readable" % path) + state_to_restore = read_state(b_path) + else: + cmd = ' '.join(SAVECOMMAND) + + (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True) + + # The issue comes when wanting to restore state from empty iptable-save's + # output... what happens when, say: + # - no table is specified, and iptables-save's output is only nat table; + # - we give filter's ruleset to iptables-restore, that locks ourselve out + # of the host; + # then trying to roll iptables state back to the previous (working) setup + # doesn't override current filter table because no filter table is stored + # in the backup ! So we have to ensure tables to be restored have a backup + # in case of rollback. + if table is None: + if state == 'restored': + for t in TABLES: + if '*%s' % t in state_to_restore: + if len(stdout) == 0 or '*%s' % t not in stdout.splitlines(): + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t) + elif len(stdout) == 0: + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter') + + elif state == 'restored' and '*%s' % table not in state_to_restore: + module.fail_json(msg="Table %s to restore not defined in %s" % (table, path)) + + elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines(): + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table) + + initial_state = filter_and_format_state(stdout) + if initial_state is None: + module.fail_json(msg="Unable to initialize firewall from NULL state.") + + # Depending on the value of 'table', initref_state may differ from + # initial_state. + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + tables_before = per_table_state(SAVECOMMAND, stdout) + initref_state = filter_and_format_state(stdout) + + if state == 'saved': + changed = write_state(b_path, initref_state, changed) + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + saved=initref_state) + + # + # All remaining code is for state=restored + # + + MAINCOMMAND = list(COMMANDARGS) + MAINCOMMAND.insert(0, bin_iptables_restore) + + if wait is not None: + MAINCOMMAND.extend(['--wait', '%s' % wait]) + + if _back is not None: + b_back = to_bytes(_back, errors='surrogate_or_strict') + dummy = write_state(b_back, initref_state, changed) + BACKCOMMAND = list(MAINCOMMAND) + BACKCOMMAND.append(_back) + + if noflush: + MAINCOMMAND.append('--noflush') + + MAINCOMMAND.append(path) + cmd = ' '.join(MAINCOMMAND) + + TESTCOMMAND = list(MAINCOMMAND) + TESTCOMMAND.insert(1, '--test') + error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore)) + + # Due to a bug in iptables-nft-restore --test, we have to validate tables + # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003). + for t in tables_before: + testcommand = list(TESTCOMMAND) + testcommand.extend(['--table', t]) + (rc, stdout, stderr) = module.run_command(testcommand) + + if 'Another app is currently holding the xtables lock' in stderr: + error_msg = stderr + + if rc != 0: + cmd = ' '.join(testcommand) + module.fail_json( + msg=error_msg, + cmd=cmd, + rc=rc, + stdout=stdout, + stderr=stderr, + tables=tables_before, + initial_state=initial_state, + restored=state_to_restore, + applied=False) + + if module.check_mode: + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write("{0}\n".format("\n".join(initial_state))) + + if filecmp.cmp(tmpfile, b_path): + restored_state = initial_state + else: + restored_state = state_to_restore + + else: + # Let time enough to the plugin to retrieve async status of the module + # in case of bad option type/value and the like. + if _back is not None: + b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict') + while True: + if os.path.exists(b_starter): + os.remove(b_starter) + break + time.sleep(0.01) + + (rc, stdout, stderr) = module.run_command(MAINCOMMAND) + if 'Another app is currently holding the xtables lock' in stderr: + module.fail_json( + msg=stderr, + cmd=cmd, + rc=rc, + stdout=stdout, + stderr=stderr, + tables=tables_before, + initial_state=initial_state, + restored=state_to_restore, + applied=False) + + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + restored_state = filter_and_format_state(stdout) + + if restored_state not in (initref_state, initial_state): + if module.check_mode: + changed = True + else: + tables_after = per_table_state(SAVECOMMAND, stdout) + if tables_after != tables_before: + changed = True + + if _back is None or module.check_mode: + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=True) + + # The rollback implementation currently needs: + # Here: + # * test existence of the backup file, exit with success if it doesn't exist + # * otherwise, restore iptables from this file and return failure + # Action plugin: + # * try to remove the backup file + # * wait async task is finished and retrieve its final status + # * modify it and return the result + # Task: + # * task attribute 'async' set to the same value (or lower) than ansible + # timeout + # * task attribute 'poll' equals 0 + # + for dummy in range(_timeout): + if os.path.exists(b_back): + time.sleep(1) + continue + module.exit_json( + changed=changed, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=True) + + # Here we are: for whatever reason, but probably due to the current ruleset, + # the action plugin (i.e. on the controller) was unable to remove the backup + # cookie, so we restore initial state from it. + (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True) + os.remove(b_back) + + (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) + tables_rollback = per_table_state(SAVECOMMAND, stdout) + + msg = ( + "Failed to confirm state restored from %s after %ss. " + "Firewall has been rolled back to its initial state." % (path, _timeout) + ) + + module.fail_json( + changed=(tables_before != tables_rollback), + msg=msg, + cmd=cmd, + tables=tables_before, + initial_state=initial_state, + restored=restored_state, + applied=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py new file mode 100644 index 000000000..7b05aefb7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ipwcli_dns + +short_description: Manage DNS Records for Ericsson IPWorks via ipwcli + +version_added: '0.2.0' + +description: + - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." + +requirements: + - ipwcli (installed on Ericsson IPWorks) + +notes: + - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + dnsname: + description: + - Name of the record. + required: true + type: str + type: + description: + - Type of the record. + required: true + type: str + choices: [ NAPTR, SRV, A, AAAA ] + container: + description: + - Sets the container zone for the record. + required: true + type: str + address: + description: + - The IP address for the A or AAAA record. + - Required for I(type=A) or I(type=AAAA). + type: str + ttl: + description: + - Sets the TTL of the record. + type: int + default: 3600 + state: + description: + - Whether the record should exist or not. + type: str + choices: [ absent, present ] + default: present + priority: + description: + - Sets the priority of the SRV record. + type: int + default: 10 + weight: + description: + - Sets the weight of the SRV record. + type: int + default: 10 + port: + description: + - Sets the port of the SRV record. + - Required for I(type=SRV). + type: int + target: + description: + - Sets the target of the SRV record. + - Required for I(type=SRV). + type: str + order: + description: + - Sets the order of the NAPTR record. + - Required for I(type=NAPTR). + type: int + preference: + description: + - Sets the preference of the NAPTR record. + - Required for I(type=NAPTR). + type: int + flags: + description: + - Sets one of the possible flags of NAPTR record. + - Required for I(type=NAPTR). + type: str + choices: ['S', 'A', 'U', 'P'] + service: + description: + - Sets the service of the NAPTR record. + - Required for I(type=NAPTR). + type: str + replacement: + description: + - Sets the replacement of the NAPTR record. + - Required for I(type=NAPTR). + type: str + username: + description: + - Username to login on ipwcli. + type: str + required: true + password: + description: + - Password to login on ipwcli. + type: str + required: true + +author: + - Christian Wollinger (@cwollinger) +''' + +EXAMPLES = ''' +- name: Create A record + community.general.ipwcli_dns: + dnsname: example.com + type: A + container: ZoneOne + address: 127.0.0.1 + +- name: Remove SRV record if exists + community.general.ipwcli_dns: + dnsname: _sip._tcp.test.example.com + type: SRV + container: ZoneOne + ttl: 100 + state: absent + target: example.com + port: 5060 + +- name: Create NAPTR record + community.general.ipwcli_dns: + dnsname: test.example.com + type: NAPTR + preference: 10 + container: ZoneOne + ttl: 100 + order: 10 + service: 'SIP+D2T' + replacement: '_sip._tcp.test.example.com.' + flags: S +''' + +RETURN = ''' +record: + description: The created record from the input params + type: str + returned: always +''' + +from ansible.module_utils.basic import AnsibleModule + + +class ResourceRecord(object): + + def __init__(self, module): + self.module = module + self.dnsname = module.params['dnsname'] + self.dnstype = module.params['type'] + self.container = module.params['container'] + self.address = module.params['address'] + self.ttl = module.params['ttl'] + self.state = module.params['state'] + self.priority = module.params['priority'] + self.weight = module.params['weight'] + self.port = module.params['port'] + self.target = module.params['target'] + self.order = module.params['order'] + self.preference = module.params['preference'] + self.flags = module.params['flags'] + self.service = module.params['service'] + self.replacement = module.params['replacement'] + self.user = module.params['username'] + self.password = module.params['password'] + + def create_naptrrecord(self): + # create NAPTR record with the given params + record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"' + % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement)) + return record + + def create_srvrecord(self): + # create SRV record with the given params + record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s' + % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target)) + return record + + def create_arecord(self): + # create A record with the given params + if self.dnstype == 'AAAA': + record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + else: + record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container) + + return record + + def list_record(self, record): + # check if the record exists via list on ipwcli + search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=search) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or + ('NAPTRRecord %s' % self.dnsname in out and rc == 0)): + return True, rc, out, err + + return False, rc, out, err + + def deploy_record(self, record): + # check what happens if create fails on ipworks + stdin = 'create %s' % (record) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) created.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record creation failed', stderr=out) + + def delete_record(self, record): + # check what happens if create fails on ipworks + stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] + rc, out, err = self.module.run_command(cmd, data=stdin) + + if 'Invalid username or password' in out: + self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password') + + if '1 object(s) were updated.' in out: + return rc, out, err + else: + self.module.fail_json(msg='record deletion failed', stderr=out) + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + dnsname=dict(type='str', required=True), + type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), + container=dict(type='str', required=True), + address=dict(type='str', required=False), + ttl=dict(type='int', required=False, default=3600), + state=dict(type='str', default='present', choices=['absent', 'present']), + priority=dict(type='int', required=False, default=10), + weight=dict(type='int', required=False, default=10), + port=dict(type='int', required=False), + target=dict(type='str', required=False), + order=dict(type='int', required=False), + preference=dict(type='int', required=False), + flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), + service=dict(type='str', required=False), + replacement=dict(type='str', required=False), + username=dict(type='str', required=True), + password=dict(type='str', required=True, no_log=True) + ) + + # define result + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + record='' + ) + + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + required_if=[ + ['type', 'A', ['address']], + ['type', 'AAAA', ['address']], + ['type', 'SRV', ['port', 'target']], + ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']], + ], + supports_check_mode=True + ) + + user = ResourceRecord(module) + + if user.dnstype == 'NAPTR': + record = user.create_naptrrecord() + elif user.dnstype == 'SRV': + record = user.create_srvrecord() + elif user.dnstype == 'A' or user.dnstype == 'AAAA': + record = user.create_arecord() + + found, rc, out, err = user.list_record(record) + + if found and user.state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.delete_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + elif not found and user.state == 'present': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = user.deploy_record(record) + result['changed'] = True + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + else: + result['changed'] = False + result['record'] = record + result['rc'] = rc + result['stdout'] = out + result['stderr'] = err + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/irc.py b/ansible_collections/community/general/plugins/modules/irc.py new file mode 100644 index 000000000..6cd7bc120 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/irc.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: irc +short_description: Send a message to an IRC channel or a nick +description: + - Send a message to an IRC channel or a nick. This is a very simplistic implementation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server: + type: str + description: + - IRC server name/address + default: localhost + port: + type: int + description: + - IRC server port number + default: 6667 + nick: + type: str + description: + - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting. + default: ansible + msg: + type: str + description: + - The message body. + required: true + topic: + type: str + description: + - Set the channel topic + color: + type: str + description: + - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). + Added 11 more colors in version 2.0. + default: "none" + choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", "light_gray"] + aliases: [colour] + channel: + type: str + description: + - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. + nick_to: + type: list + elements: str + description: + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. + key: + type: str + description: + - Channel key + passwd: + type: str + description: + - Server password + timeout: + type: int + description: + - Timeout to use while waiting for successful registration and join + messages, this is to prevent an endless loop + default: 30 + use_ssl: + description: + - Designates whether TLS/SSL should be used when connecting to the IRC server + type: bool + default: false + part: + description: + - Designates whether user should part from channel after sending message or not. + Useful for when using a faux bot and not wanting join/parts between messages. + type: bool + default: true + style: + type: str + description: + - Text style for the message. Note italic does not work on some clients + choices: [ "bold", "underline", "reverse", "italic", "none" ] + default: none + +# informational: requirements for nodes +requirements: [ socket ] +author: + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" +''' + +EXAMPLES = ''' +- name: Send a message to an IRC channel from nick ansible + community.general.irc: + server: irc.example.net + channel: #t1 + msg: Hello world + +- name: Send a message to an IRC channel + local_action: + module: irc + port: 6669 + server: irc.example.net + channel: #t1 + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC + +- name: Send a message to an IRC channel + local_action: + module: irc + port: 6669 + server: irc.example.net + channel: #t1 + nick_to: + - nick1 + - nick2 + msg: 'All finished at {{ ansible_date_time.iso8601 }}' + color: red + nick: ansibleIRC +''' + +# =========================================== +# IRC module support methods. +# + +import re +import socket +import ssl +import time +import traceback + +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): + '''send message to IRC''' + nick_to = [] if nick_to is None else nick_to + + colornumbers = { + 'white': "00", + 'black': "01", + 'blue': "02", + 'green': "03", + 'red': "04", + 'brown': "05", + 'purple': "06", + 'orange': "07", + 'yellow': "08", + 'light_green': "09", + 'teal': "10", + 'light_cyan': "11", + 'light_blue': "12", + 'pink': "13", + 'gray': "14", + 'light_gray': "15", + } + + stylechoices = { + 'bold': "\x02", + 'underline': "\x1F", + 'reverse': "\x16", + 'italic': "\x1D", + } + + try: + styletext = stylechoices[style] + except Exception: + styletext = "" + + try: + colornumber = colornumbers[color] + colortext = "\x03" + colornumber + except Exception: + colortext = "" + + message = styletext + colortext + msg + + irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if use_ssl: + irc = ssl.wrap_socket(irc) + irc.connect((server, int(port))) + + if passwd: + irc.send(to_bytes('PASS %s\r\n' % passwd)) + irc.send(to_bytes('NICK %s\r\n' % nick)) + irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))) + motd = '' + start = time.time() + while 1: + motd += to_native(irc.recv(1024)) + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). + match = re.search(r'^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + if match: + nick = match.group('nick') + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC server welcome response') + time.sleep(0.5) + + if channel: + if key: + irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key))) + else: + irc.send(to_bytes('JOIN %s\r\n' % channel)) + + join = '' + start = time.time() + while 1: + join += to_native(irc.recv(1024)) + if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I): + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC JOIN response') + time.sleep(0.5) + + if topic is not None: + irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic))) + time.sleep(1) + + if nick_to: + for nick in nick_to: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message))) + if channel: + irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message))) + time.sleep(1) + if part: + if channel: + irc.send(to_bytes('PART %s\r\n' % channel)) + irc.send(to_bytes('QUIT\r\n')) + time.sleep(1) + irc.close() + +# =========================================== +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(type='int', default=6667), + nick=dict(default='ansible'), + nick_to=dict(required=False, type='list', elements='str'), + msg=dict(required=True), + color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", + "green", "red", "brown", + "purple", "orange", "yellow", + "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", + "light_gray", "none"]), + style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), + channel=dict(required=False), + key=dict(no_log=True), + topic=dict(), + passwd=dict(no_log=True), + timeout=dict(type='int', default=30), + part=dict(type='bool', default=True), + use_ssl=dict(type='bool', default=False) + ), + supports_check_mode=True, + required_one_of=[['channel', 'nick_to']] + ) + + server = module.params["server"] + port = module.params["port"] + nick = module.params["nick"] + nick_to = module.params["nick_to"] + msg = module.params["msg"] + color = module.params["color"] + channel = module.params["channel"] + topic = module.params["topic"] + if topic and not channel: + module.fail_json(msg="When topic is specified, a channel is required.") + key = module.params["key"] + passwd = module.params["passwd"] + timeout = module.params["timeout"] + use_ssl = module.params["use_ssl"] + part = module.params["part"] + style = module.params["style"] + + try: + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) + except Exception as e: + module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, channel=channel, nick=nick, + msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/iso_create.py b/ansible_collections/community/general/plugins/modules/iso_create.py new file mode 100644 index 000000000..4b51be96d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/iso_create.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Ansible Project +# Copyright (c) 2020, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: iso_create +short_description: Generate ISO file with specified files or folders +description: + - This module is used to generate ISO file with specified path of files. +author: + - Diane Wang (@Tomorrow9) +requirements: + - "pycdlib" + - "python >= 2.7" +version_added: '0.2.0' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_files: + description: + - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. + - Will fail if specified file or folder in C(src_files) does not exist on local machine. + - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and + underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path + names are limited to 255 characters.' + type: list + required: true + elements: path + dest_iso: + description: + - The absolute path with file name of the new generated ISO file on local machine. + - Will create intermediate folders when they does not exist. + type: path + required: true + interchange_level: + description: + - The ISO9660 interchange level to use, it dictates the rules on the names of files. + - Levels and valid values C(1), C(2), C(3), C(4) are supported. + - The default value is level C(1), which is the most conservative, level C(3) is recommended. + - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension. + type: int + default: 1 + choices: [1, 2, 3, 4] + vol_ident: + description: + - The volume identification string to use on the new generated ISO image. + type: str + rock_ridge: + description: + - Whether to make this ISO have the Rock Ridge extensions or not. + - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO. + - If unsure, set C(1.09) to ensure maximum compatibility. + - If not specified, then not add Rock Ridge extension to the ISO. + type: str + choices: ['1.09', '1.10', '1.12'] + joliet: + description: + - Support levels and valid values are C(1), C(2), or C(3). + - Level C(3) is by far the most common. + - If not specified, then no Joliet support is added. + type: int + choices: [1, 2, 3] + udf: + description: + - Whether to add UDF support to this ISO. + - If set to C(True), then version 2.60 of the UDF spec is used. + - If not specified or set to C(False), then no UDF support is added. + type: bool + default: false +''' + +EXAMPLES = r''' +- name: Create an ISO file + community.general.iso_create: + src_files: + - /root/testfile.yml + - /root/testfolder + dest_iso: /tmp/test.iso + interchange_level: 3 + +- name: Create an ISO file with Rock Ridge extension + community.general.iso_create: + src_files: + - /root/testfile.yml + - /root/testfolder + dest_iso: /tmp/test.iso + rock_ridge: 1.09 + +- name: Create an ISO file with Joliet support + community.general.iso_create: + src_files: + - ./windows_config/Autounattend.xml + dest_iso: ./test.iso + interchange_level: 3 + joliet: 3 + vol_ident: WIN_AUTOINSTALL +''' + +RETURN = r''' +source_file: + description: Configured source files or directories list. + returned: on success + type: list + elements: path + sample: ["/path/to/file.txt", "/path/to/folder"] +created_iso: + description: Created iso file path. + returned: on success + type: str + sample: "/path/to/test.iso" +interchange_level: + description: Configured interchange level. + returned: on success + type: int + sample: 3 +vol_ident: + description: Configured volume identification string. + returned: on success + type: str + sample: "OEMDRV" +joliet: + description: Configured Joliet support level. + returned: on success + type: int + sample: 3 +rock_ridge: + description: Configured Rock Ridge version. + returned: on success + type: str + sample: "1.09" +udf: + description: Configured UDF support. + returned: on success + type: bool + sample: false +''' + +import os +import traceback + +PYCDLIB_IMP_ERR = None +try: + import pycdlib + HAS_PYCDLIB = True +except ImportError: + PYCDLIB_IMP_ERR = traceback.format_exc() + HAS_PYCDLIB = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): + rr_name = None + joliet_path = None + udf_path = None + # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot, + # followed by a maximum 3 character extension, followed by a semicolon and a version + file_name = os.path.basename(file_path) + if '.' not in file_name: + file_in_iso_path = file_path.upper() + '.;1' + else: + file_in_iso_path = file_path.upper() + ';1' + if rock_ridge: + rr_name = file_name + if use_joliet: + joliet_path = file_path + if use_udf: + udf_path = file_path + try: + iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) + except Exception as err: + module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err))) + + +def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None): + rr_name = None + joliet_path = None + udf_path = None + iso_dir_path = dir_path.upper() + if rock_ridge: + rr_name = os.path.basename(dir_path) + if use_joliet: + joliet_path = dir_path + if use_udf: + udf_path = dir_path + try: + iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) + except Exception as err: + module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err))) + + +def main(): + argument_spec = dict( + src_files=dict(type='list', required=True, elements='path'), + dest_iso=dict(type='path', required=True), + interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1), + vol_ident=dict(type='str'), + rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']), + joliet=dict(type='int', choices=[1, 2, 3]), + udf=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if not HAS_PYCDLIB: + module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR) + + src_file_list = module.params.get('src_files') + if src_file_list and len(src_file_list) == 0: + module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.') + for src_file in src_file_list: + if not os.path.exists(src_file): + module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file) + + dest_iso = module.params.get('dest_iso') + if dest_iso and len(dest_iso) == 0: + module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.') + + dest_iso_dir = os.path.dirname(dest_iso) + if dest_iso_dir and not os.path.exists(dest_iso_dir): + # will create intermediate dir for new ISO file + try: + os.makedirs(dest_iso_dir) + except OSError as err: + module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err))) + + volume_id = module.params.get('vol_ident') + if volume_id is None: + volume_id = '' + inter_level = module.params.get('interchange_level') + rock_ridge = module.params.get('rock_ridge') + use_joliet = module.params.get('joliet') + use_udf = None + if module.params['udf']: + use_udf = '2.60' + + result = dict( + changed=False, + source_file=src_file_list, + created_iso=dest_iso, + interchange_level=inter_level, + vol_ident=volume_id, + rock_ridge=rock_ridge, + joliet=use_joliet, + udf=use_udf + ) + if not module.check_mode: + iso_file = pycdlib.PyCdlib(always_consistent=True) + iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) + + for src_file in src_file_list: + # if specify a dir then go through the dir to add files and dirs + if os.path.isdir(src_file): + dir_list = [] + file_list = [] + src_file = src_file.rstrip('/') + dir_name = os.path.basename(src_file) + add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge, + use_joliet=use_joliet, use_udf=use_udf) + + # get dir list and file list + for path, dirs, files in os.walk(src_file): + for filename in files: + file_list.append(os.path.join(path, filename)) + for dir in dirs: + dir_list.append(os.path.join(path, dir)) + for new_dir in dir_list: + add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1], + rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + for new_file in file_list: + add_file(module, iso_file=iso_file, src_file=new_file, + file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge, + use_joliet=use_joliet, use_udf=use_udf) + # if specify a file then add this file directly to the '/' path in ISO + else: + add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file), + rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf) + + iso_file.write(dest_iso) + iso_file.close() + + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/iso_customize.py b/ansible_collections/community/general/plugins/modules/iso_customize.py new file mode 100644 index 000000000..9add080b1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/iso_customize.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Ansible Project +# Copyright (c) 2022, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: iso_customize +short_description: Add/remove/change files in ISO file +description: + - This module is used to add/remove/change files in ISO file. + - The file inside ISO will be overwritten if it exists by option I(add_files). +author: + - Yuhua Zou (@ZouYuhua) +requirements: + - "pycdlib" + - "python >= 2.7" +version_added: '5.8.0' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_iso: + description: + - This is the path of source ISO file. + type: path + required: true + dest_iso: + description: + - The path of the customized ISO file. + type: path + required: true + delete_files: + description: + - Absolute paths for files inside the ISO file that should be removed. + type: list + required: false + elements: str + default: [] + add_files: + description: + - Allows to add and replace files in the ISO file. + - Will create intermediate folders inside the ISO file when they do not exist. + type: list + required: false + elements: dict + default: [] + suboptions: + src_file: + description: + - The path with file name on the machine the module is executed on. + type: path + required: true + dest_file: + description: + - The absolute path of the file inside the ISO file. + type: str + required: true +notes: +- The C(pycdlib) library states it supports Python 2.7 and 3.4 only. +- > + The function I(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF. + But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. + So we take workaround "delete the existing file and then add file for ISO with Rock Ridge". +''' + +EXAMPLES = r''' +- name: "Customize ISO file" + community.general.iso_customize: + src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso" + dest_iso: "/path/to/ubuntu-22.04-desktop-amd64-customized.iso" + delete_files: + - "/boot.catalog" + add_files: + - src_file: "/path/to/grub.cfg" + dest_file: "/boot/grub/grub.cfg" + - src_file: "/path/to/ubuntu.seed" + dest_file: "/preseed/ubuntu.seed" + register: customize_iso_result +''' + +RETURN = r''' +src_iso: + description: Path of source ISO file. + returned: on success + type: str + sample: "/path/to/file.iso" +dest_iso: + description: Path of the customized ISO file. + returned: on success + type: str + sample: "/path/to/customized.iso" +''' + +import os + +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +with deps.declare("pycdlib"): + import pycdlib + + +# The upper dir exist, we only add subdirectoy +def iso_add_dir(module, opened_iso, iso_type, dir_path): + parent_dir, check_dirname = dir_path.rsplit("/", 1) + if not parent_dir.strip(): + parent_dir = "/" + check_dirname = check_dirname.strip() + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname == parent_dir.upper(): + if check_dirname.upper() in dirlist: + return + + if parent_dir == "/": + current_dirpath = "/%s" % check_dirname + else: + current_dirpath = "%s/%s" % (parent_dir, check_dirname) + + current_dirpath_upper = current_dirpath.upper() + try: + if iso_type == "iso9660": + opened_iso.add_directory(current_dirpath_upper) + elif iso_type == "rr": + opened_iso.add_directory(current_dirpath_upper, rr_name=check_dirname) + elif iso_type == "joliet": + opened_iso.add_directory(current_dirpath_upper, joliet_path=current_dirpath) + elif iso_type == "udf": + opened_iso.add_directory(current_dirpath_upper, udf_path=current_dirpath) + except Exception as err: + msg = "Failed to create dir %s with error: %s" % (current_dirpath, to_native(err)) + module.fail_json(msg=msg) + + +def iso_add_dirs(module, opened_iso, iso_type, dir_path): + dirnames = dir_path.strip().split("/") + + current_dirpath = "/" + for item in dirnames: + if not item.strip(): + continue + if current_dirpath == "/": + current_dirpath = "/%s" % item + else: + current_dirpath = "%s/%s" % (current_dirpath, item) + + iso_add_dir(module, opened_iso, iso_type, current_dirpath) + + +def iso_check_file_exists(opened_iso, dest_file): + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + dirnames = file_dir.strip().split("/") + + parent_dir = "/" + for item in dirnames: + if not item.strip(): + continue + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + break + + if item.upper() not in dirlist: + return False + + if parent_dir == "/": + parent_dir = "/%s" % item + else: + parent_dir = "%s/%s" % (parent_dir, item) + + if '.' not in file_name: + file_in_iso_path = file_name.upper() + '.;1' + else: + file_in_iso_path = file_name.upper() + ';1' + + for dirname, dummy_dirlist, filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + return False + + return file_name.upper() in filelist or file_in_iso_path in filelist + + +def iso_add_file(module, opened_iso, iso_type, src_file, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + + file_local = src_file.strip() + + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + if file_dir and file_dir != "/": + iso_add_dirs(module, opened_iso, iso_type, file_dir) + + try: + if iso_type == "iso9660": + opened_iso.add_file(file_local, iso_path=file_in_iso_path) + elif iso_type == "rr": + # For ISO with Rock Ridge 1.09 / 1.10, it won't overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(iso_path=file_in_iso_path) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, rr_name=file_name) + elif iso_type == "joliet": + opened_iso.add_file(file_local, iso_path=file_in_iso_path, joliet_path=dest_file) + elif iso_type == "udf": + # For ISO with UDF, it won't always succeed to overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(udf_path=dest_file) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, udf_path=dest_file) + except Exception as err: + msg = "Failed to add local file %s to ISO with error: %s" % (file_local, to_native(err)) + module.fail_json(msg=msg) + + +def iso_delete_file(module, opened_iso, iso_type, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + file_name = os.path.basename(dest_file) + + if not iso_check_file_exists(opened_iso, dest_file): + module.fail_json(msg="The file %s does not exist." % dest_file) + + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + try: + if iso_type == "iso9660": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "rr": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "joliet": + opened_iso.rm_file(joliet_path=dest_file) + elif iso_type == "udf": + opened_iso.rm_file(udf_path=dest_file) + except Exception as err: + msg = "Failed to delete iso file %s with error: %s" % (dest_file, to_native(err)) + module.fail_json(msg=msg) + + +def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list): + iso = None + iso_type = "iso9660" + + try: + iso = pycdlib.PyCdlib(always_consistent=True) + iso.open(src_iso) + if iso.has_rock_ridge(): + iso_type = "rr" + elif iso.has_joliet(): + iso_type = "joliet" + elif iso.has_udf(): + iso_type = "udf" + + for item in delete_files_list: + iso_delete_file(module, iso, iso_type, item) + + for item in add_files_list: + iso_add_file(module, iso, iso_type, item['src_file'], item['dest_file']) + + iso.write(dest_iso) + except Exception as err: + msg = "Failed to rebuild ISO %s with error: %s" % (src_iso, to_native(err)) + module.fail_json(msg=msg) + finally: + if iso: + iso.close() + + +def main(): + argument_spec = dict( + src_iso=dict(type='path', required=True), + dest_iso=dict(type='path', required=True), + delete_files=dict(type='list', elements='str', default=[]), + add_files=dict( + type='list', elements='dict', default=[], + options=dict( + src_file=dict(type='path', required=True), + dest_file=dict(type='str', required=True), + ), + ), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[('delete_files', 'add_files'), ], + supports_check_mode=True, + ) + deps.validate(module) + + src_iso = module.params['src_iso'] + if not os.path.exists(src_iso): + module.fail_json(msg="ISO file %s does not exist." % src_iso) + + dest_iso = module.params['dest_iso'] + dest_iso_dir = os.path.dirname(dest_iso) + if dest_iso_dir and not os.path.exists(dest_iso_dir): + module.fail_json(msg="The dest directory %s does not exist" % dest_iso_dir) + + delete_files_list = [s.strip() for s in module.params['delete_files']] + add_files_list = module.params['add_files'] + if add_files_list: + for item in add_files_list: + if not os.path.exists(item['src_file']): + module.fail_json(msg="The file %s does not exist." % item['src_file']) + + result = dict( + src_iso=src_iso, + customized_iso=dest_iso, + delete_files=delete_files_list, + add_files=add_files_list, + changed=True, + ) + + if not module.check_mode: + iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list) + + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/iso_extract.py b/ansible_collections/community/general/plugins/modules/iso_extract.py new file mode 100644 index 000000000..599cbe4de --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/iso_extract.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Jeroen Hoekx +# Copyright (c) 2016, Matt Robinson +# Copyright (c) 2017, Dag Wieers +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Jeroen Hoekx (@jhoekx) + - Matt Robinson (@ribbons) + - Dag Wieers (@dagwieers) +module: iso_extract +short_description: Extract files from an ISO image +description: + - This module has two possible ways of operation. + - If 7zip is installed on the system, this module extracts files from an ISO + into a temporary directory and copies files to a given destination, + if needed. + - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module + mounts the ISO image to a temporary location, and copies files to a given + destination, if needed. +requirements: + - Either 7z (from C(7zip) or C(p7zip) package) + - Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + image: + description: + - The ISO image to extract files from. + type: path + required: true + aliases: [ path, src ] + dest: + description: + - The destination directory to extract files to. + type: path + required: true + files: + description: + - A list of files to extract from the image. + - Extracting directories does not work. + type: list + elements: str + required: true + force: + description: + - If C(true), which will replace the remote file when contents are different than the source. + - If C(false), the file will only be extracted and copied if the destination does not already exist. + type: bool + default: true + executable: + description: + - The path to the C(7z) executable to use for extracting files from the ISO. + - If not provided, it will assume the value C(7z). + type: path +notes: +- Only the file checksum (content) is taken into account when extracting files + from the ISO image. If I(force=false), only checks the presence of the file. +- In Ansible 2.3 this module was using C(mount) and C(umount) commands only, + requiring root access. This is no longer needed with the introduction of 7zip + for extraction. +''' + +EXAMPLES = r''' +- name: Extract kernel and ramdisk from a LiveCD + community.general.iso_extract: + image: /tmp/rear-test.iso + dest: /tmp/virt-rear/ + files: + - isolinux/kernel + - isolinux/initrd.cgz +''' + +RETURN = r''' +# +''' + +import os.path +import shutil +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + image=dict(type='path', required=True, aliases=['path', 'src']), + dest=dict(type='path', required=True), + files=dict(type='list', elements='str', required=True), + force=dict(type='bool', default=True), + executable=dict(type='path'), # No default on purpose + ), + supports_check_mode=True, + ) + image = module.params['image'] + dest = module.params['dest'] + files = module.params['files'] + force = module.params['force'] + executable = module.params['executable'] + + result = dict( + changed=False, + dest=dest, + image=image, + ) + + # We want to know if the user provided it or not, so we set default here + if executable is None: + executable = '7z' + + binary = module.get_bin_path(executable, None) + + # When executable was provided and binary not found, warn user ! + if module.params['executable'] is not None and not binary: + module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable) + + if not os.path.exists(dest): + module.fail_json(msg="Directory '%s' does not exist" % dest) + + if not os.path.exists(os.path.dirname(image)): + module.fail_json(msg="ISO image '%s' does not exist" % image) + + result['files'] = [] + extract_files = list(files) + + if not force: + # Check if we have to process any files based on existence + for f in files: + dest_file = os.path.join(dest, os.path.basename(f)) + if os.path.exists(dest_file): + result['files'].append(dict( + checksum=None, + dest=dest_file, + src=f, + )) + extract_files.remove(f) + + if not extract_files: + module.exit_json(**result) + + tmp_dir = tempfile.mkdtemp() + + # Use 7zip when we have a binary, otherwise try to mount + if binary: + cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files + else: + cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] + + rc, out, err = module.run_command(cmd) + if rc != 0: + result.update(dict( + cmd=cmd, + rc=rc, + stderr=err, + stdout=out, + )) + shutil.rmtree(tmp_dir) + + if binary: + module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) + else: + module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) + + try: + for f in extract_files: + tmp_src = os.path.join(tmp_dir, f) + if not os.path.exists(tmp_src): + module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) + + src_checksum = module.sha1(tmp_src) + + dest_file = os.path.join(dest, os.path.basename(f)) + + if os.path.exists(dest_file): + dest_checksum = module.sha1(dest_file) + else: + dest_checksum = None + + result['files'].append(dict( + checksum=src_checksum, + dest=dest_file, + src=f, + )) + + if src_checksum != dest_checksum: + if not module.check_mode: + shutil.copy(tmp_src, dest_file) + + result['changed'] = True + finally: + if not binary: + module.run_command([module.get_bin_path('umount'), tmp_dir]) + + shutil.rmtree(tmp_dir) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jabber.py b/ansible_collections/community/general/plugins/modules/jabber.py new file mode 100644 index 000000000..650b29957 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jabber.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: jabber +short_description: Send a message to jabber user or chat room +description: + - Send a message to jabber +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + user: + type: str + description: + - User as which to connect + required: true + password: + type: str + description: + - password for user to connect + required: true + to: + type: str + description: + - user ID or name of the room, when using room use a slash to indicate your nick. + required: true + msg: + type: str + description: + - The message body. + required: true + host: + type: str + description: + - host to connect, overrides user info + port: + type: int + description: + - port to connect to, overrides default + default: 5222 + encoding: + type: str + description: + - message encoding + +# informational: requirements for nodes +requirements: + - python xmpp (xmpppy) +author: "Brian Coca (@bcoca)" +''' + +EXAMPLES = ''' +- name: Send a message to a user + community.general.jabber: + user: mybot@example.net + password: secret + to: friend@example.net + msg: Ansible task finished + +- name: Send a message to a room + community.general.jabber: + user: mybot@example.net + password: secret + to: mychaps@conference.example.net/ansiblebot + msg: Ansible task finished + +- name: Send a message, specifying the host and port + community.general.jabber: + user: mybot@example.net + host: talk.example.net + port: 5223 + password: secret + to: mychaps@example.net + msg: Ansible task finished +''' + +import time +import traceback + +HAS_XMPP = True +XMPP_IMP_ERR = None +try: + import xmpp +except ImportError: + XMPP_IMP_ERR = traceback.format_exc() + HAS_XMPP = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + password=dict(required=True, no_log=True), + to=dict(required=True), + msg=dict(required=True), + host=dict(required=False), + port=dict(required=False, default=5222, type='int'), + encoding=dict(required=False), + ), + supports_check_mode=True + ) + + if not HAS_XMPP: + module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR) + + jid = xmpp.JID(module.params['user']) + user = jid.getNode() + server = jid.getDomain() + port = module.params['port'] + password = module.params['password'] + try: + to, nick = module.params['to'].split('/', 1) + except ValueError: + to, nick = module.params['to'], None + + if module.params['host']: + host = module.params['host'] + else: + host = server + if module.params['encoding']: + xmpp.simplexml.ENCODING = module.params['encoding'] + + msg = xmpp.protocol.Message(body=module.params['msg']) + + try: + conn = xmpp.Client(server, debug=[]) + if not conn.connect(server=(host, port)): + module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) + if not conn.auth(user, password, 'Ansible'): + module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) + # some old servers require this, also the sleep following send + conn.sendInitPresence(requestRoster=0) + + if nick: # sending to room instead of user, need to join + msg.setType('groupchat') + msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') + join = xmpp.Presence(to=module.params['to']) + join.setTag('x', namespace='http://jabber.org/protocol/muc') + conn.send(join) + time.sleep(1) + else: + msg.setType('chat') + + msg.setTo(to) + if not module.check_mode: + conn.send(msg) + time.sleep(1) + conn.disconnect() + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/java_cert.py b/ansible_collections/community/general/plugins/modules/java_cert.py new file mode 100644 index 000000000..a188b16c3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/java_cert.py @@ -0,0 +1,585 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, RSD Services S.A +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: java_cert + +short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) +description: + - This is a wrapper module around keytool, which can be used to import certificates + and optionally private keys to a given java keystore, or remove them from it. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + cert_url: + description: + - Basic URL to fetch SSL certificate from. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + type: str + cert_port: + description: + - Port to connect to URL. + - This will be used to create server URL:PORT. + type: int + default: 443 + cert_path: + description: + - Local path to load certificate from. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + type: path + cert_alias: + description: + - Imported certificate alias. + - The alias is used when checking for the presence of a certificate in the keystore. + type: str + trust_cacert: + description: + - Trust imported cert as CAcert. + type: bool + default: false + version_added: '0.2.0' + pkcs12_path: + description: + - Local path to load PKCS12 keystore from. + - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching + the certificate, and is used to import both the certificate and its private key into the + java keystore. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + type: path + pkcs12_password: + description: + - Password for importing from PKCS12 keystore. + type: str + pkcs12_alias: + description: + - Alias in the PKCS12 keystore. + type: str + keystore_path: + description: + - Path to keystore. + type: path + keystore_pass: + description: + - Keystore password. + type: str + required: true + keystore_create: + description: + - Create keystore if it does not exist. + type: bool + default: false + keystore_type: + description: + - Keystore type (JCEKS, JKS). + type: str + executable: + description: + - Path to keytool binary if not used we search in PATH for it. + type: str + default: keytool + state: + description: + - Defines action which can be either certificate import or removal. + - When state is present, the certificate will always idempotently be inserted + into the keystore, even if there already exists a cert alias that is different. + type: str + choices: [ absent, present ] + default: present +requirements: [openssl, keytool] +author: +- Adam Hamsik (@haad) +''' + +EXAMPLES = r''' +- name: Import SSL certificate from google.com to a given cacerts keystore + community.general.java_cert: + cert_url: google.com + cert_port: 443 + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + state: present + +- name: Remove certificate with given alias from a keystore + community.general.java_cert: + cert_url: google.com + keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts + keystore_pass: changeit + executable: /usr/lib/jvm/jre7/bin/keytool + state: absent + +- name: Import trusted CA from SSL certificate + community.general.java_cert: + cert_path: /opt/certs/rootca.crt + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + cert_alias: LE_RootCA + trust_cacert: true + +- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist + community.general.java_cert: + cert_url: google.com + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + +- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist + community.general.java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + cert_alias: default + keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks + keystore_pass: changeit + keystore_create: true + state: present + +- name: Import SSL certificate to JCEKS keystore + community.general.java_cert: + pkcs12_path: "/tmp/importkeystore.p12" + pkcs12_alias: default + pkcs12_password: somepass + cert_alias: default + keystore_path: /opt/someapp/security/keystore.jceks + keystore_type: "JCEKS" + keystore_pass: changeit + keystore_create: true + state: present +''' + +RETURN = r''' +msg: + description: Output from stdout of keytool command after execution of given command. + returned: success + type: str + sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'" + +rc: + description: Keytool command execution return value. + returned: success + type: int + sample: "0" + +cmd: + description: Executed command to get action done. + returned: success + type: str + sample: "keytool -importcert -noprompt -keystore" +''' + +import os +import tempfile +import re + + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.six.moves.urllib.request import getproxies + + +def _get_keystore_type_keytool_parameters(keystore_type): + ''' Check that custom keystore is presented in parameters ''' + if keystore_type: + return ["-storetype", keystore_type] + return [] + + +def _check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): + ''' Check if certificate with alias is present in keystore + located at keystore_path ''' + test_cmd = [ + executable, + "-list", + "-keystore", + keystore_path, + "-alias", + alias, + "-rfc" + ] + test_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + (check_rc, stdout, dummy) = module.run_command(test_cmd, data=keystore_pass, check_rc=False) + if check_rc == 0: + return (True, stdout) + return (False, '') + + +def _get_certificate_from_url(module, executable, url, port, pem_certificate_output): + remote_cert_pem_chain = _download_cert_url(module, executable, url, port) + with open(pem_certificate_output, 'w') as f: + f.write(remote_cert_pem_chain) + + +def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_certificate_output, openssl_bin): + """ Read a X509 certificate chain file and output the first certificate in the list """ + extract_cmd = [ + openssl_bin, + "x509", + "-in", + pem_certificate_file, + "-out", + pem_certificate_output + ] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # trying der encoded file + extract_cmd += ["-inform", "der"] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # this time it's a real failure + module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr, + rc=extract_rc, cmd=extract_cmd) + + return extract_rc + + +def _get_digest_from_x509_file(module, pem_certificate_file, openssl_bin): + """ Read a X509 certificate file and output sha256 digest using openssl """ + # cleanup file before to compare + (dummy, tmp_certificate) = tempfile.mkstemp() + module.add_cleanup_file(tmp_certificate) + _get_first_certificate_from_x509_file(module, pem_certificate_file, tmp_certificate, openssl_bin) + dgst_cmd = [ + openssl_bin, + "dgst", + "-r", + "-sha256", + tmp_certificate + ] + (dgst_rc, dgst_stdout, dgst_stderr) = module.run_command(dgst_cmd, check_rc=False) + + if dgst_rc != 0: + module.fail_json(msg="Internal module failure, cannot compute digest for certificate, error: %s" % dgst_stderr, + rc=dgst_rc, cmd=dgst_cmd) + + return dgst_stdout.split(' ')[0] + + +def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, password, dest): + """ Runs keytools to extract the public cert from a PKCS12 archive and write it to a file. """ + export_cmd = [ + executable, + "-list", + "-noprompt", + "-keystore", + pkcs_file, + "-alias", + alias, + "-storetype", + "pkcs12", + "-rfc" + ] + (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) + + if export_rc != 0: + module.fail_json(msg="Internal module failure, cannot extract public certificate from PKCS12, message: %s" % export_stdout, + stderr=export_err, + rc=export_rc) + + with open(dest, 'w') as f: + f.write(export_stdout) + + +def get_proxy_settings(scheme='https'): + """ Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found """ + proxy_url = getproxies().get(scheme, '') + if not proxy_url: + return (False, False) + else: + parsed_url = urlparse(proxy_url) + if parsed_url.scheme: + (proxy_host, proxy_port) = parsed_url.netloc.split(':') + else: + (proxy_host, proxy_port) = parsed_url.path.split(':') + return (proxy_host, proxy_port) + + +def build_proxy_options(): + """ Returns list of valid proxy options for keytool """ + (proxy_host, proxy_port) = get_proxy_settings() + no_proxy = os.getenv("no_proxy") + + proxy_opts = [] + if proxy_host: + proxy_opts.extend(["-J-Dhttps.proxyHost=%s" % proxy_host, "-J-Dhttps.proxyPort=%s" % proxy_port]) + + if no_proxy is not None: + # For Java's nonProxyHosts property, items are separated by '|', + # and patterns have to start with "*". + non_proxy_hosts = no_proxy.replace(',', '|') + non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts) + + # The property name is http.nonProxyHosts, there is no + # separate setting for HTTPS. + proxy_opts.extend(["-J-Dhttp.nonProxyHosts=%s" % non_proxy_hosts]) + return proxy_opts + + +def _download_cert_url(module, executable, url, port): + """ Fetches the certificate from the remote URL using `keytool -printcert...` + The PEM formatted string is returned """ + proxy_opts = build_proxy_options() + fetch_cmd = [executable, "-printcert", "-rfc", "-sslserver"] + proxy_opts + ["%s:%d" % (url, port)] + + # Fetch SSL certificate from remote host. + (fetch_rc, fetch_out, fetch_err) = module.run_command(fetch_cmd, check_rc=False) + + if fetch_rc != 0: + module.fail_json(msg="Internal module failure, cannot download certificate, error: %s" % fetch_err, + rc=fetch_rc, cmd=fetch_cmd) + + return fetch_out + + +def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, keystore_alias, keystore_type): + ''' Import pkcs12 from path into keystore located on + keystore_path as alias ''' + import_cmd = [ + executable, + "-importkeystore", + "-noprompt", + "-srcstoretype", + "pkcs12", + "-srckeystore", + pkcs12_path, + "-srcalias", + pkcs12_alias, + "-destkeystore", + keystore_path, + "-destalias", + keystore_alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) + # Password of a new keystore must be entered twice, for confirmation + if not os.path.exists(keystore_path): + secret_data = "%s\n%s" % (keystore_pass, secret_data) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % keystore_alias} + if import_rc == 0 and os.path.exists(keystore_path): + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + +def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): + ''' Import certificate from path into keystore located on + keystore_path as alias ''' + import_cmd = [ + executable, + "-importcert", + "-noprompt", + "-keystore", + keystore_path, + "-file", + path, + "-alias", + alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + if trust_cacert: + import_cmd.extend(["-trustcacerts"]) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, + data="%s\n%s" % (keystore_pass, keystore_pass), + check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % alias} + if import_rc == 0: + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) + + +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True): + ''' Delete certificate identified with alias from keystore on keystore_path ''' + del_cmd = [ + executable, + "-delete", + "-noprompt", + "-keystore", + keystore_path, + "-alias", + alias + ] + + del_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + # Delete SSL certificate from keystore + (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) + + if exit_after: + diff = {'before': '%s\n' % alias, 'after': None} + + module.exit_json(changed=True, msg=del_out, + rc=del_rc, cmd=del_cmd, stdout=del_out, + error=del_err, diff=diff) + + +def test_keytool(module, executable): + ''' Test if keytool is actually executable or not ''' + module.run_command([executable], check_rc=True) + + +def test_keystore(module, keystore_path): + ''' Check if we can access keystore as file or not ''' + if keystore_path is None: + keystore_path = '' + + if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path): + # Keystore doesn't exist we want to create it + module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path) + + +def main(): + argument_spec = dict( + cert_url=dict(type='str'), + cert_path=dict(type='path'), + pkcs12_path=dict(type='path'), + pkcs12_password=dict(type='str', no_log=True), + pkcs12_alias=dict(type='str'), + cert_alias=dict(type='str'), + cert_port=dict(type='int', default=443), + keystore_path=dict(type='path'), + keystore_pass=dict(type='str', required=True, no_log=True), + trust_cacert=dict(type='bool', default=False), + keystore_create=dict(type='bool', default=False), + keystore_type=dict(type='str'), + executable=dict(type='str', default='keytool'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[['state', 'present', ('cert_path', 'cert_url', 'pkcs12_path'), True], + ['state', 'absent', ('cert_url', 'cert_alias'), True]], + required_together=[['keystore_path', 'keystore_pass']], + mutually_exclusive=[ + ['cert_url', 'cert_path', 'pkcs12_path'] + ], + supports_check_mode=True, + ) + + url = module.params.get('cert_url') + path = module.params.get('cert_path') + port = module.params.get('cert_port') + + pkcs12_path = module.params.get('pkcs12_path') + pkcs12_pass = module.params.get('pkcs12_password', '') + pkcs12_alias = module.params.get('pkcs12_alias', '1') + + cert_alias = module.params.get('cert_alias') or url + trust_cacert = module.params.get('trust_cacert') + + keystore_path = module.params.get('keystore_path') + keystore_pass = module.params.get('keystore_pass') + keystore_create = module.params.get('keystore_create') + keystore_type = module.params.get('keystore_type') + executable = module.params.get('executable') + state = module.params.get('state') + + # openssl dependency resolution + openssl_bin = module.get_bin_path('openssl', True) + + if path and not cert_alias: + module.fail_json(changed=False, + msg="Using local path import from %s requires alias argument." + % keystore_path) + + test_keytool(module, executable) + + if not keystore_create: + test_keystore(module, keystore_path) + + alias_exists, alias_exists_output = _check_cert_present( + module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + (dummy, new_certificate) = tempfile.mkstemp() + (dummy, old_certificate) = tempfile.mkstemp() + module.add_cleanup_file(new_certificate) + module.add_cleanup_file(old_certificate) + + if state == 'absent' and alias_exists: + if module.check_mode: + module.exit_json(changed=True) + + # delete and exit + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + + # dump certificate to enroll in the keystore on disk and compute digest + if state == 'present': + # The alias exists in the keystore so we must now compare the SHA256 hash of the + # public certificate already in the keystore, and the certificate we are wanting to add + if alias_exists: + with open(old_certificate, "w") as f: + f.write(alias_exists_output) + keystore_cert_digest = _get_digest_from_x509_file(module, old_certificate, openssl_bin) + + else: + keystore_cert_digest = '' + + if pkcs12_path: + # Extracting certificate with openssl + _export_public_cert_from_pkcs12(module, executable, pkcs12_path, pkcs12_alias, pkcs12_pass, new_certificate) + + elif path: + # Extracting the X509 digest is a bit easier. Keytool will print the PEM + # certificate to stdout so we don't need to do any transformations. + new_certificate = path + + elif url: + # Getting the X509 digest from a URL is the same as from a path, we just have + # to download the cert first + _get_certificate_from_url(module, executable, url, port, new_certificate) + + new_cert_digest = _get_digest_from_x509_file(module, new_certificate, openssl_bin) + + if keystore_cert_digest != new_cert_digest: + + if module.check_mode: + module.exit_json(changed=True) + + if alias_exists: + # The certificate in the keystore does not match with the one we want to be present + # The existing certificate must first be deleted before we insert the correct one + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) + + if pkcs12_path: + import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) + else: + import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) + + module.exit_json(changed=False) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/java_keystore.py b/ansible_collections/community/general/plugins/modules/java_keystore.py new file mode 100644 index 000000000..7c2c4884d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/java_keystore.py @@ -0,0 +1,584 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, quidame +# Copyright (c) 2016, Guillaume Grossetie +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: java_keystore +short_description: Create a Java keystore in JKS format +description: + - Bundle a x509 certificate and its private key into a Java Keystore in JKS format. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the certificate in the keystore. + - If the provided name does not exist in the keystore, the module + will re-create the keystore. This behavior changed in community.general 3.0.0, + before that the module would fail when the name did not match. + type: str + required: true + certificate: + description: + - Content of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the + fingerprint of the certificate bundled in the keystore, the keystore + is regenerated with the provided certificate. + - Exactly one of I(certificate) or I(certificate_path) is required. + type: str + certificate_path: + description: + - Location of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the + fingerprint of the certificate bundled in the keystore, the keystore + is regenerated with the provided certificate. + - Exactly one of I(certificate) or I(certificate_path) is required. + type: path + version_added: '3.0.0' + private_key: + description: + - Content of the private key used to create the keystore. + - Exactly one of I(private_key) or I(private_key_path) is required. + type: str + private_key_path: + description: + - Location of the private key used to create the keystore. + - Exactly one of I(private_key) or I(private_key_path) is required. + type: path + version_added: '3.0.0' + private_key_passphrase: + description: + - Passphrase used to read the private key, if required. + type: str + version_added: '0.2.0' + password: + description: + - Password that should be used to secure the keystore. + - If the provided password fails to unlock the keystore, the module + will re-create the keystore with the new passphrase. This behavior + changed in community.general 3.0.0, before that the module would fail + when the password did not match. + type: str + required: true + dest: + description: + - Absolute path of the generated keystore. + type: path + required: true + force: + description: + - Keystore is created even if it already exists. + type: bool + default: false + owner: + description: + - Name of the user that should own jks file. + required: false + group: + description: + - Name of the group that should own jks file. + required: false + mode: + description: + - Mode the file should be. + required: false + ssl_backend: + description: + - Backend for loading private keys and certificates. + type: str + default: openssl + choices: + - openssl + - cryptography + version_added: 3.1.0 + keystore_type: + description: + - Type of the Java keystore. + - When this option is omitted and the keystore doesn't already exist, the + behavior follows C(keytool)'s default store type which depends on + Java version; C(pkcs12) since Java 9 and C(jks) prior (may also + be C(pkcs12) if new default has been backported to this version). + - When this option is omitted and the keystore already exists, the current + type is left untouched, unless another option leads to overwrite the + keystore (in that case, this option behaves like for keystore creation). + - When I(keystore_type) is set, the keystore is created with this type if + it doesn't already exist, or is overwritten to match the given type in + case of mismatch. + type: str + choices: + - jks + - pkcs12 + version_added: 3.3.0 +requirements: + - openssl in PATH (when I(ssl_backend=openssl)) + - keytool in PATH + - cryptography >= 3.0 (when I(ssl_backend=cryptography)) +author: + - Guillaume Grossetie (@Mogztter) + - quidame (@quidame) +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +seealso: + - module: community.crypto.openssl_pkcs12 + - module: community.general.java_cert +notes: + - I(certificate) and I(private_key) require that their contents are available + on the controller (either inline in a playbook, or with the C(file) lookup), + while I(certificate_path) and I(private_key_path) require that the files are + available on the target host. + - By design, any change of a value of options I(keystore_type), I(name) or + I(password), as well as changes of key or certificate materials will cause + the existing I(dest) to be overwritten. +''' + +EXAMPLES = ''' +- name: Create a keystore for the given certificate/private key pair (inline) + community.general.java_keystore: + name: example + certificate: | + -----BEGIN CERTIFICATE----- + h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69 + MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB + -----END CERTIFICATE----- + private_key: | + -----BEGIN RSA PRIVATE KEY----- + DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3 + GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99 + -----END RSA PRIVATE KEY----- + password: changeit + dest: /etc/security/keystore.jks + +- name: Create a keystore for the given certificate/private key pair (with files on controller) + community.general.java_keystore: + name: example + certificate: "{{ lookup('file', '/path/to/certificate.crt') }}" + private_key: "{{ lookup('file', '/path/to/private.key') }}" + password: changeit + dest: /etc/security/keystore.jks + +- name: Create a keystore for the given certificate/private key pair (with files on target host) + community.general.java_keystore: + name: snakeoil + certificate_path: /etc/ssl/certs/ssl-cert-snakeoil.pem + private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key + password: changeit + dest: /etc/security/keystore.jks +''' + +RETURN = ''' +msg: + description: Output from stdout of keytool/openssl command after execution of given command or an error. + returned: changed and failure + type: str + sample: "Unable to find the current certificate fingerprint in ..." + +err: + description: Output from stderr of keytool/openssl command after error of given command. + returned: failure + type: str + sample: "Keystore password is too short - must be at least 6 characters\n" + +rc: + description: keytool/openssl command execution return value + returned: changed and failure + type: int + sample: "0" + +cmd: + description: Executed command to get action done + returned: changed and failure + type: str + sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256" +''' + + +import os +import re +import tempfile + +from ansible.module_utils.six import PY2 +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native + +try: + from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates + from cryptography.hazmat.primitives.serialization import ( + BestAvailableEncryption, + NoEncryption, + load_pem_private_key, + load_der_private_key, + ) + from cryptography.x509 import ( + load_pem_x509_certificate, + load_der_x509_certificate, + ) + from cryptography.hazmat.primitives import hashes + from cryptography.exceptions import UnsupportedAlgorithm + from cryptography.hazmat.backends.openssl import backend + HAS_CRYPTOGRAPHY_PKCS12 = True +except ImportError: + HAS_CRYPTOGRAPHY_PKCS12 = False + + +class JavaKeystore: + def __init__(self, module): + self.module = module + self.result = dict() + + self.keytool_bin = module.get_bin_path('keytool', True) + + self.certificate = module.params['certificate'] + self.keypass = module.params['private_key_passphrase'] + self.keystore_path = module.params['dest'] + self.name = module.params['name'] + self.password = module.params['password'] + self.private_key = module.params['private_key'] + self.ssl_backend = module.params['ssl_backend'] + self.keystore_type = module.params['keystore_type'] + + if self.ssl_backend == 'openssl': + self.openssl_bin = module.get_bin_path('openssl', True) + else: + if not HAS_CRYPTOGRAPHY_PKCS12: + self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0')) + + if module.params['certificate_path'] is None: + self.certificate_path = create_file(self.certificate) + self.module.add_cleanup_file(self.certificate_path) + else: + self.certificate_path = module.params['certificate_path'] + + if module.params['private_key_path'] is None: + self.private_key_path = create_file(self.private_key) + self.module.add_cleanup_file(self.private_key_path) + else: + self.private_key_path = module.params['private_key_path'] + + def update_permissions(self): + file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path) + return self.module.set_fs_attributes_if_different(file_args, False) + + def read_certificate_fingerprint(self, cert_format='PEM'): + if self.ssl_backend == 'cryptography': + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError) as e: + self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e)) + + fp = hex_decode(cert.fingerprint(hashes.SHA256())).upper() + fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) + else: + current_certificate_fingerprint_cmd = [ + self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256" + ] + (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command( + current_certificate_fingerprint_cmd, + environ_update=None, + check_rc=False + ) + if rc != 0: + return self.module.fail_json( + msg=current_certificate_fingerprint_out, + err=current_certificate_fingerprint_err, + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) + if not current_certificate_match: + return self.module.fail_json( + msg="Unable to find the current certificate fingerprint in %s" % ( + current_certificate_fingerprint_out + ), + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + fingerprint = current_certificate_match.group(1) + return fingerprint + + def read_stored_certificate_fingerprint(self): + stored_certificate_fingerprint_cmd = [ + self.keytool_bin, "-list", "-alias", self.name, + "-keystore", self.keystore_path, "-v" + ] + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( + stored_certificate_fingerprint_cmd, data=self.password, check_rc=False) + if rc != 0: + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ + in stored_certificate_fingerprint_out: + return "alias mismatch" + if re.match( + r'keytool error: java\.io\.IOException: ' + + '[Kk]eystore( was tampered with, or)? password was incorrect', + stored_certificate_fingerprint_out + ): + return "password mismatch" + return self.module.fail_json( + msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + if self.keystore_type not in (None, self.current_type()): + return "keystore type mismatch" + + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return self.module.fail_json( + msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + return stored_certificate_match.group(1) + + def current_type(self): + magic_bytes = b'\xfe\xed\xfe\xed' + with open(self.keystore_path, 'rb') as fd: + header = fd.read(4) + if header == magic_bytes: + return 'jks' + return 'pkcs12' + + def cert_changed(self): + current_certificate_fingerprint = self.read_certificate_fingerprint() + stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() + return current_certificate_fingerprint != stored_certificate_fingerprint + + def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'): + if key_format == 'PEM': + key_loader = load_pem_private_key + else: + key_loader = load_der_private_key + + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=to_bytes(self.keypass), + backend=backend + ) + except TypeError: + # Re-attempt with no password to match existing behavior + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=None, + backend=backend + ) + except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided certificate: %s" % to_native(e) + ) + + if self.password: + encryption = BestAvailableEncryption(to_bytes(self.password)) + else: + encryption = NoEncryption() + + pkcs12_bundle = serialize_key_and_certificates( + name=to_bytes(self.name), + key=private_key, + cert=cert, + cas=None, + encryption_algorithm=encryption + ) + + with open(keystore_p12_path, 'wb') as p12_file: + p12_file.write(pkcs12_bundle) + + self.result.update(msg="PKCS#12 bundle created by cryptography backend") + + def openssl_create_pkcs12_bundle(self, keystore_p12_path): + export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, + "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + + # when keypass is provided, add -passin + cmd_stdin = "" + if self.keypass: + export_p12_cmd.append("-passin") + export_p12_cmd.append("stdin") + cmd_stdin = "%s\n" % self.keypass + cmd_stdin += "%s\n%s" % (self.password, self.password) + + (rc, export_p12_out, export_p12_err) = self.module.run_command( + export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False + ) + + self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + if rc != 0: + self.result['err'] = export_p12_err + self.module.fail_json(**self.result) + + def create(self): + """Create the keystore, or replace it with a rollback in case of + keytool failure. + """ + if self.module.check_mode: + self.result['changed'] = True + return self.result + + keystore_p12_path = create_path() + self.module.add_cleanup_file(keystore_p12_path) + + if self.ssl_backend == 'cryptography': + self.cryptography_create_pkcs12_bundle(keystore_p12_path) + else: + self.openssl_create_pkcs12_bundle(keystore_p12_path) + + if self.keystore_type == 'pkcs12': + # Preserve properties of the destination file, if any. + self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.update_permissions() + self.result['changed'] = True + return self.result + + import_keystore_cmd = [self.keytool_bin, "-importkeystore", + "-destkeystore", self.keystore_path, + "-srckeystore", keystore_p12_path, + "-srcstoretype", "pkcs12", + "-alias", self.name, + "-noprompt"] + + if self.keystore_type == 'jks': + keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help']) + if '-deststoretype' in keytool_help[1] + keytool_help[2]: + import_keystore_cmd.insert(4, "-deststoretype") + import_keystore_cmd.insert(5, self.keystore_type) + + keystore_backup = None + if self.exists(): + keystore_backup = self.keystore_path + '.tmpbak' + # Preserve properties of the source file + self.module.preserved_copy(self.keystore_path, keystore_backup) + os.remove(self.keystore_path) + + (rc, import_keystore_out, import_keystore_err) = self.module.run_command( + import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False + ) + + self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + # keytool may return 0 whereas the keystore has not been created. + if rc != 0 or not self.exists(): + if keystore_backup is not None: + self.module.preserved_copy(keystore_backup, self.keystore_path) + os.remove(keystore_backup) + self.result['err'] = import_keystore_err + return self.module.fail_json(**self.result) + + self.update_permissions() + if keystore_backup is not None: + os.remove(keystore_backup) + self.result['changed'] = True + return self.result + + def exists(self): + return os.path.exists(self.keystore_path) + + +# Utility functions +def create_path(): + dummy, tmpfile = tempfile.mkstemp() + os.remove(tmpfile) + return tmpfile + + +def create_file(content): + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: + f.write(content) + return tmpfile + + +def hex_decode(s): + if PY2: + return s.decode('hex') + return s.hex() + + +def main(): + choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + dest=dict(type='path', required=True), + certificate=dict(type='str', no_log=True), + certificate_path=dict(type='path'), + private_key=dict(type='str', no_log=True), + private_key_path=dict(type='path', no_log=False), + private_key_passphrase=dict(type='str', no_log=True), + password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), + force=dict(type='bool', default=False), + ), + required_one_of=choose_between, + mutually_exclusive=choose_between, + supports_check_mode=True, + add_file_common_args=True, + ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + result = dict() + jks = JavaKeystore(module) + + if jks.exists(): + if module.params['force'] or jks.cert_changed(): + result = jks.create() + else: + result['changed'] = jks.update_permissions() + else: + result = jks.create() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jboss.py b/ansible_collections/community/general/plugins/modules/jboss.py new file mode 100644 index 000000000..b389e7e66 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jboss.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +module: jboss +short_description: Deploy applications to JBoss +description: + - Deploy applications to JBoss standalone using the filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + deployment: + required: true + description: + - The name of the deployment. + type: str + src: + description: + - The remote path of the application ear or war to deploy. + - Required when I(state=present). + - Ignored when I(state=absent). + type: path + deploy_path: + default: /var/lib/jbossas/standalone/deployments + description: + - The location in the filesystem where the deployment scanner listens. + type: path + state: + choices: [ present, absent ] + default: "present" + description: + - Whether the application should be deployed or undeployed. + type: str +notes: + - The JBoss standalone deployment-scanner has to be enabled in standalone.xml + - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner. + Duration of waiting time depends on scan-interval parameter from standalone.xml. + - Ensure no identically named application is deployed through the JBoss CLI +seealso: +- name: WildFly reference + description: Complete reference of the WildFly documentation. + link: https://docs.wildfly.org +author: + - Jeroen Hoekx (@jhoekx) +''' + +EXAMPLES = r""" +- name: Deploy a hello world application to the default deploy_path + community.general.jboss: + src: /tmp/hello-1.0-SNAPSHOT.war + deployment: hello.war + state: present + +- name: Update the hello world application to the non-default deploy_path + community.general.jboss: + src: /tmp/hello-1.1-SNAPSHOT.war + deploy_path: /opt/wildfly/deployment + deployment: hello.war + state: present + +- name: Undeploy the hello world application from the default deploy_path + community.general.jboss: + deployment: hello.war + state: absent +""" + +RETURN = r""" # """ + +import os +import time +from ansible.module_utils.basic import AnsibleModule + + +DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments' + + +def is_deployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment)) + + +def is_undeployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment)) + + +def is_failed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + src=dict(type='path'), + deployment=dict(type='str', required=True), + deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH), + state=dict(type='str', choices=['absent', 'present'], default='present'), + ), + required_if=[('state', 'present', ('src',))], + supports_check_mode=True + ) + + result = dict(changed=False) + + src = module.params['src'] + deployment = module.params['deployment'] + deploy_path = module.params['deploy_path'] + state = module.params['state'] + + if not os.path.exists(deploy_path): + module.fail_json(msg="deploy_path does not exist.") + + if state == 'absent' and src: + module.warn('Parameter src is ignored when state=absent') + elif state == 'present' and not os.path.exists(src): + module.fail_json(msg='Source file %s does not exist.' % src) + + deployed = is_deployed(deploy_path, deployment) + + # === when check_mode === + if module.check_mode: + if state == 'present': + if not deployed: + result['changed'] = True + + elif deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + result['changed'] = True + + elif state == 'absent' and deployed: + result['changed'] = True + + module.exit_json(**result) + # ======================= + + if state == 'present' and not deployed: + if is_failed(deploy_path, deployment): + # Clean up old failed deployment + os.remove(os.path.join(deploy_path, "%s.failed" % deployment)) + + module.preserved_copy(src, os.path.join(deploy_path, deployment)) + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'present' and deployed: + if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)): + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + module.preserved_copy(src, os.path.join(deploy_path, deployment)) + deployed = False + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + if state == 'absent' and deployed: + os.remove(os.path.join(deploy_path, "%s.deployed" % deployment)) + while deployed: + deployed = not is_undeployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Undeploying %s failed.' % deployment) + time.sleep(1) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jenkins_build.py b/ansible_collections/community/general/plugins/modules/jenkins_build.py new file mode 100644 index 000000000..4f9520224 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jenkins_build.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: jenkins_build +short_description: Manage jenkins builds +version_added: 2.2.0 +description: + - Manage Jenkins builds with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Brett Milford (@brettmilford) + - Tong He (@unnecessary-username) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + args: + description: + - A list of parameters to pass to the build. + type: dict + name: + description: + - Name of the Jenkins job to build. + required: true + type: str + build_number: + description: + - An integer which specifies a build of a job. Is required to remove a build from the queue. + type: int + password: + description: + - Password to authenticate with the Jenkins server. + type: str + state: + description: + - Attribute that specifies if the build is to be created, deleted or stopped. + - The C(stopped) state has been added in community.general 3.3.0. + default: present + choices: ['present', 'absent', 'stopped'] + type: str + token: + description: + - API token used to authenticate with the Jenkins server. + type: str + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str +''' + +EXAMPLES = ''' +- name: Create a jenkins build using basic authentication + community.general.jenkins_build: + name: "test-check" + args: + cloud: "test" + availability_zone: "test_az" + state: present + user: admin + password: asdfg + url: http://localhost:8080 + +- name: Stop a running jenkins build anonymously + community.general.jenkins_build: + name: "stop-check" + build_number: 3 + state: stopped + url: http://localhost:8080 + +- name: Delete a jenkins build using token authentication + community.general.jenkins_build: + name: "delete-experiment" + build_number: 30 + state: absent + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 +''' + +RETURN = ''' +--- +name: + description: Name of the jenkins job. + returned: success + type: str + sample: "test-job" +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: Url to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +build_info: + description: Build info of the jenkins job. + returned: success + type: dict +''' + +import traceback +from time import sleep + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsBuild: + + def __init__(self, module): + self.module = module + + self.name = module.params.get('name') + self.password = module.params.get('password') + self.args = module.params.get('args') + self.state = module.params.get('state') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.build_number = module.params.get('build_number') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + } + + self.EXCL_STATE = "excluded state" + + def get_jenkins_connection(self): + try: + if (self.user and self.password): + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif (self.user and self.token): + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif (self.user and not (self.password or self.token)): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def get_next_build(self): + try: + build_number = self.server.get_job_info(self.name)['nextBuildNumber'] + except Exception as e: + self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), + exception=traceback.format_exc()) + + return build_number + + def get_build_status(self): + try: + response = self.server.get_build_info(self.name, self.build_number) + return response + except jenkins.JenkinsException as e: + response = {} + response["result"] = "ABSENT" + return response + except Exception as e: + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) + + def present_build(self): + self.build_number = self.get_next_build() + + try: + if self.args is None: + self.server.build_job(self.name) + else: + self.server.build_job(self.name, self.args) + except Exception as e: + self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def stopped_build(self): + build_info = None + try: + build_info = self.server.get_build_info(self.name, self.build_number) + if build_info['building'] is True: + self.server.stop_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to stop build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + else: + if build_info['building'] is False: + self.module.exit_json(**self.result) + + def absent_build(self): + try: + self.server.delete_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to delete build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + build_status = self.get_build_status() + + if build_status['result'] is None: + sleep(10) + self.get_result() + else: + if self.state == "stopped" and build_status['result'] == "ABORTED": + result['changed'] = True + result['build_info'] = build_status + elif self.state == "absent" and build_status['result'] == "ABSENT": + result['changed'] = True + result['build_info'] = build_status + elif self.state != "absent" and build_status['result'] == "SUCCESS": + result['changed'] = True + result['build_info'] = build_status + else: + result['failed'] = True + result['build_info'] = build_status + + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + args=dict(type='dict'), + build_number=dict(type='int'), + name=dict(required=True), + password=dict(no_log=True), + state=dict(choices=['present', 'absent', 'stopped'], default="present"), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + ), + mutually_exclusive=[['password', 'token']], + required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], + ) + + test_dependencies(module) + jenkins_build = JenkinsBuild(module) + + if module.params.get('state') == "present": + jenkins_build.present_build() + elif module.params.get('state') == "stopped": + jenkins_build.stopped_build() + else: + jenkins_build.absent_build() + + sleep(10) + result = jenkins_build.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job.py b/ansible_collections/community/general/plugins/modules/jenkins_job.py new file mode 100644 index 000000000..09b006448 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jenkins_job.py @@ -0,0 +1,386 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: jenkins_job +short_description: Manage jenkins jobs +description: + - Manage Jenkins jobs by using Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: "Sergio Millan Rodriguez (@sermilrod)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + config: + type: str + description: + - config in XML format. + - Required if job does not yet exist. + - Mutually exclusive with I(enabled). + - Considered if I(state=present). + required: false + enabled: + description: + - Whether the job should be enabled or disabled. + - Mutually exclusive with I(config). + - Considered if I(state=present). + type: bool + required: false + name: + type: str + description: + - Name of the Jenkins job. + required: true + password: + type: str + description: + - Password to authenticate with the Jenkins server. + required: false + state: + type: str + description: + - Attribute that specifies if the job has to be created or deleted. + required: false + default: present + choices: ['present', 'absent'] + token: + type: str + description: + - API token used to authenticate alternatively to password. + required: false + url: + type: str + description: + - URL where the Jenkins server is accessible. + required: false + default: http://localhost:8080 + user: + type: str + description: + - User to authenticate with the Jenkins server. + required: false + validate_certs: + type: bool + default: true + description: + - If set to C(false), the SSL certificates will not be validated. + This should only set to C(false) used on personally controlled sites + using self-signed certificates as it avoids verifying the source site. + - The C(python-jenkins) library only handles this by using the environment variable C(PYTHONHTTPSVERIFY). + version_added: 2.3.0 +''' + +EXAMPLES = ''' +- name: Create a jenkins job using basic authentication + community.general.jenkins_job: + config: "{{ lookup('file', 'templates/test.xml') }}" + name: test + password: admin + url: http://localhost:8080 + user: admin + +- name: Create a jenkins job using the token + community.general.jenkins_job: + config: "{{ lookup('template', 'templates/test.xml.j2') }}" + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + url: http://localhost:8080 + user: admin + +- name: Delete a jenkins job using basic authentication + community.general.jenkins_job: + name: test + password: admin + state: absent + url: http://localhost:8080 + user: admin + +- name: Delete a jenkins job using the token + community.general.jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + state: absent + url: http://localhost:8080 + user: admin + +- name: Disable a jenkins job using basic authentication + community.general.jenkins_job: + name: test + password: admin + enabled: false + url: http://localhost:8080 + user: admin + +- name: Disable a jenkins job using the token + community.general.jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + enabled: false + url: http://localhost:8080 + user: admin +''' + +RETURN = ''' +--- +name: + description: Name of the jenkins job. + returned: success + type: str + sample: test-job +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +enabled: + description: Whether the jenkins job is enabled or not. + returned: success + type: bool + sample: true +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: Url to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +''' + +import os +import traceback +import xml.etree.ElementTree as ET + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsJob(object): + + def __init__(self, module): + self.module = module + + self.config = module.params.get('config') + self.name = module.params.get('name') + self.password = module.params.get('password') + self.state = module.params.get('state') + self.enabled = module.params.get('enabled') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + 'diff': { + 'before': "", + 'after': "" + } + } + + self.EXCL_STATE = "excluded state" + if not module.params['validate_certs']: + os.environ['PYTHONHTTPSVERIFY'] = '0' + + def get_jenkins_connection(self): + try: + if self.user and self.password: + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif self.user and self.token: + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif self.user and not (self.password or self.token): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + + def get_job_status(self): + try: + response = self.server.get_job_info(self.name) + if "color" not in response: + return self.EXCL_STATE + else: + return to_native(response['color']) + + except Exception as e: + self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc()) + + def job_exists(self): + try: + return bool(self.server.job_exists(self.name)) + except Exception as e: + self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_config(self): + return job_config_to_string(self.config) + + def get_current_config(self): + return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8')) + + def has_config_changed(self): + # config is optional, if not provided we keep the current config as is + if self.config is None: + return False + + config_file = self.get_config() + machine_file = self.get_current_config() + + self.result['diff']['after'] = config_file + self.result['diff']['before'] = machine_file + + if machine_file != config_file: + return True + return False + + def present_job(self): + if self.config is None and self.enabled is None: + self.module.fail_json(msg='one of the following params is required on state=present: config,enabled') + + if not self.job_exists(): + self.create_job() + else: + self.update_job() + + def has_state_changed(self, status): + # Keep in current state if enabled arg_spec is not given + if self.enabled is None: + return False + + return (self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled") + + def switch_state(self): + if self.enabled is False: + self.server.disable_job(self.name) + else: + self.server.enable_job(self.name) + + def update_job(self): + try: + status = self.get_job_status() + + # Handle job config + if self.has_config_changed(): + self.result['changed'] = True + if not self.module.check_mode: + self.server.reconfig_job(self.name, self.get_config()) + + # Handle job disable/enable + elif status != self.EXCL_STATE and self.has_state_changed(status): + self.result['changed'] = True + if not self.module.check_mode: + self.switch_state() + + except Exception as e: + self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def create_job(self): + if self.config is None: + self.module.fail_json(msg='missing required param: config') + + self.result['changed'] = True + try: + config_file = self.get_config() + self.result['diff']['after'] = config_file + if not self.module.check_mode: + self.server.create_job(self.name, config_file) + except Exception as e: + self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def absent_job(self): + if self.job_exists(): + self.result['changed'] = True + self.result['diff']['before'] = self.get_current_config() + if not self.module.check_mode: + try: + self.server.delete_job(self.name) + except Exception as e: + self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + if self.job_exists(): + result['enabled'] = self.get_job_status() != "disabled" + else: + result['enabled'] = None + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def job_config_to_string(xml_str): + return ET.tostring(ET.fromstring(xml_str)).decode('ascii') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + config=dict(type='str', required=False), + name=dict(type='str', required=True), + password=dict(type='str', required=False, no_log=True), + state=dict(type='str', required=False, choices=['present', 'absent'], default="present"), + enabled=dict(required=False, type='bool'), + token=dict(type='str', required=False, no_log=True), + url=dict(type='str', required=False, default="http://localhost:8080"), + user=dict(type='str', required=False), + validate_certs=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ['password', 'token'], + ['config', 'enabled'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_job = JenkinsJob(module) + + if module.params.get('state') == "present": + jenkins_job.present_job() + else: + jenkins_job.absent_job() + + result = jenkins_job.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py new file mode 100644 index 000000000..ba6a53117 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible Project +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: jenkins_job_info +short_description: Get information about Jenkins jobs +description: + - This module can be used to query information about which Jenkins jobs which already exists. + - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change. +requirements: + - "python-jenkins >= 0.4.12" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + type: str + description: + - Exact name of the Jenkins job to fetch information about. + glob: + type: str + description: + - A shell glob of Jenkins job names to fetch information about. + color: + type: str + description: + - Only fetch jobs with the given status color. + password: + type: str + description: + - Password to authenticate with the Jenkins server. + - This is mutually exclusive with I(token). + token: + type: str + description: + - API token used to authenticate with the Jenkins server. + - This is mutually exclusive with I(password). + url: + type: str + description: + - URL where the Jenkins server is accessible. + default: http://localhost:8080 + user: + type: str + description: + - User to authenticate with the Jenkins server. + validate_certs: + description: + - If set to C(False), the SSL certificates will not be validated. + - This should only set to C(False) used on personally controlled sites using self-signed certificates. + default: true + type: bool +author: + - "Chris St. Pierre (@stpierre)" +''' + +EXAMPLES = ''' +# Get all Jenkins jobs anonymously +- community.general.jenkins_job_info: + user: admin + register: my_jenkins_job_info + +# Get all Jenkins jobs using basic auth +- community.general.jenkins_job_info: + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get all Jenkins jobs using the token +- community.general.jenkins_job_info: + user: admin + token: abcdefghijklmnop + register: my_jenkins_job_info + +# Get info about a single job using basic auth +- community.general.jenkins_job_info: + name: some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about a single job in a folder using basic auth +- community.general.jenkins_job_info: + name: some-folder-name/some-job-name + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about jobs matching a shell glob using basic auth +- community.general.jenkins_job_info: + glob: some-job-* + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about all failing jobs using basic auth +- community.general.jenkins_job_info: + color: red + user: admin + password: hunter2 + register: my_jenkins_job_info + +# Get info about passing jobs matching a shell glob using basic auth +- community.general.jenkins_job_info: + name: some-job-* + color: blue + user: admin + password: hunter2 + register: my_jenkins_job_info + +- name: Get the info from custom URL with token and validate_certs=False + community.general.jenkins_job_info: + user: admin + token: 126df5c60d66c66e3b75b11104a16a8a + url: https://jenkins.example.com + validate_certs: false + register: my_jenkins_job_info +''' + +RETURN = ''' +--- +jobs: + description: All jobs found matching the specified criteria + returned: success + type: list + sample: + [ + { + "name": "test-job", + "fullname": "test-folder/test-job", + "url": "http://localhost:8080/job/test-job/", + "color": "blue" + }, + ] +''' + +import ssl +import fnmatch +import traceback + +JENKINS_IMP_ERR = None +try: + import jenkins + HAS_JENKINS = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + HAS_JENKINS = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def get_jenkins_connection(module): + url = module.params["url"] + username = module.params.get("user") + password = module.params.get("password") + token = module.params.get("token") + + validate_certs = module.params.get('validate_certs') + if not validate_certs and hasattr(ssl, 'SSLContext'): + ssl._create_default_https_context = ssl._create_unverified_context + if validate_certs and not hasattr(ssl, 'SSLContext'): + module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9." + " Either update Python or use validate_certs=false.") + + if username and (password or token): + return jenkins.Jenkins(url, username, password or token) + elif username: + return jenkins.Jenkins(url, username) + else: + return jenkins.Jenkins(url) + + +def test_dependencies(module): + if not HAS_JENKINS: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def get_jobs(module): + jenkins_conn = get_jenkins_connection(module) + jobs = [] + if module.params.get("name"): + try: + job_info = jenkins_conn.get_job_info(module.params.get("name")) + except jenkins.NotFoundException: + pass + else: + jobs.append({ + "name": job_info["name"], + "fullname": job_info["fullName"], + "url": job_info["url"], + "color": job_info["color"] + }) + + else: + all_jobs = jenkins_conn.get_all_jobs() + if module.params.get("glob"): + jobs.extend( + j for j in all_jobs + if fnmatch.fnmatch(j["fullname"], module.params.get("glob"))) + else: + jobs = all_jobs + # python-jenkins includes the internal Jenkins class used for each job + # in its return value; we strip that out because the leading underscore + # (and the fact that it's not documented in the python-jenkins docs) + # indicates that it's not part of the dependable public interface. + for job in jobs: + if "_class" in job: + del job["_class"] + + if module.params.get("color"): + jobs = [j for j in jobs if j["color"] == module.params.get("color")] + + return jobs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str'), + glob=dict(type='str'), + color=dict(type='str'), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + url=dict(type='str', default="http://localhost:8080"), + user=dict(type='str'), + validate_certs=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ['password', 'token'], + ['name', 'glob'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jobs = list() + + try: + jobs = get_jobs(module) + except jenkins.JenkinsException as err: + module.fail_json( + msg='Unable to connect to Jenkins server, %s' % to_native(err), + exception=traceback.format_exc()) + + module.exit_json(changed=False, jobs=jobs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py new file mode 100644 index 000000000..2fbc83e03 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py @@ -0,0 +1,854 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: jenkins_plugin +author: Jiri Tyr (@jtyr) +short_description: Add or remove Jenkins plugin +description: + - Ansible module which helps to manage Jenkins plugins. + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + type: str + description: + - Name of the Jenkins group on the OS. + default: jenkins + jenkins_home: + type: path + description: + - Home directory of the Jenkins user. + default: /var/lib/jenkins + mode: + type: raw + description: + - File mode applied on versioned plugins. + default: '0644' + name: + type: str + description: + - Plugin name. + required: true + owner: + type: str + description: + - Name of the Jenkins user on the OS. + default: jenkins + state: + type: str + description: + - Desired plugin state. + - If the C(latest) is set, the check for new version will be performed + every time. This is suitable to keep the plugin up-to-date. + choices: [absent, present, pinned, unpinned, enabled, disabled, latest] + default: present + timeout: + type: int + description: + - Server connection timeout in secs. + default: 30 + updates_expiration: + type: int + description: + - Number of seconds after which a new copy of the I(update-center.json) + file is downloaded. This is used to avoid the need to download the + plugin to calculate its checksum when C(latest) is specified. + - Set it to C(0) if no cache file should be used. In that case, the + plugin file will always be downloaded to calculate its checksum when + C(latest) is specified. + default: 86400 + updates_url: + type: list + elements: str + description: + - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from. + - This can be a list since community.general 3.3.0. + default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io'] + update_json_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the update center json file from. + default: ['update-center.json', 'updates/update-center.json'] + version_added: 3.3.0 + latest_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get latest plugins from. + default: ['latest'] + version_added: 3.3.0 + versioned_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get specific version of plugins from. + default: ['download/plugins', 'plugins'] + version_added: 3.3.0 + url: + type: str + description: + - URL of the Jenkins server. + default: http://localhost:8080 + version: + type: str + description: + - Plugin version number. + - If this option is specified, all plugin dependencies must be installed + manually. + - It might take longer to verify that the correct version is installed. + This is especially true if a specific version number is specified. + - Quote the version to prevent the value to be interpreted as float. For + example if C(1.20) would be unquoted, it would become C(1.2). + with_dependencies: + description: + - Defines whether to install plugin dependencies. + - This option takes effect only if the I(version) is not defined. + type: bool + default: true + +notes: + - Plugin installation should be run under root or the same user which owns + the plugin files on the disk. Only if the plugin is not installed yet and + no version is specified, the API installation is performed which requires + only the Web UI credentials. + - It's necessary to notify the handler or call the I(service) module to + restart the Jenkins service after a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkins service was + successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the I(url) + parameter to point to the Jenkins server. The module must be used on the + host where Jenkins runs as it needs direct access to the plugin files. +extends_documentation_fragment: + - ansible.builtin.url + - ansible.builtin.files + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Install plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + +- name: Install plugin without its dependencies + community.general.jenkins_plugin: + name: build-pipeline-plugin + with_dependencies: false + +- name: Make sure the plugin is always up-to-date + community.general.jenkins_plugin: + name: token-macro + state: latest + +- name: Install specific version of the plugin + community.general.jenkins_plugin: + name: token-macro + version: "1.15" + +- name: Pin the plugin + community.general.jenkins_plugin: + name: token-macro + state: pinned + +- name: Unpin the plugin + community.general.jenkins_plugin: + name: token-macro + state: unpinned + +- name: Enable the plugin + community.general.jenkins_plugin: + name: token-macro + state: enabled + +- name: Disable the plugin + community.general.jenkins_plugin: + name: token-macro + state: disabled + +- name: Uninstall plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + state: absent + +# +# Example of how to authenticate +# +- name: Install plugin + community.general.jenkins_plugin: + name: build-pipeline-plugin + url_username: admin + url_password: p4ssw0rd + url: http://localhost:8888 + +# +# Example of a Play which handles Jenkins restarts during the state changes +# +- name: Jenkins Master play + hosts: jenkins-master + vars: + my_jenkins_plugins: + token-macro: + enabled: true + build-pipeline-plugin: + version: "1.4.9" + pinned: false + enabled: true + tasks: + - name: Install plugins without a specific version + community.general.jenkins_plugin: + name: "{{ item.key }}" + register: my_jenkins_plugin_unversioned + when: > + 'version' not in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Install plugins with a specific version + community.general.jenkins_plugin: + name: "{{ item.key }}" + version: "{{ item.value['version'] }}" + register: my_jenkins_plugin_versioned + when: > + 'version' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Initiate the fact + ansible.builtin.set_fact: + jenkins_restart_required: false + + - name: Check if restart is required by any of the versioned plugins + ansible.builtin.set_fact: + jenkins_restart_required: true + when: item.changed + with_items: "{{ my_jenkins_plugin_versioned.results }}" + + - name: Check if restart is required by any of the unversioned plugins + ansible.builtin.set_fact: + jenkins_restart_required: true + when: item.changed + with_items: "{{ my_jenkins_plugin_unversioned.results }}" + + - name: Restart Jenkins if required + ansible.builtin.service: + name: jenkins + state: restarted + when: jenkins_restart_required + + - name: Wait for Jenkins to start up + ansible.builtin.uri: + url: http://localhost:8080 + status_code: 200 + timeout: 5 + register: jenkins_service_status + # Keep trying for 5 mins in 5 sec intervals + retries: 60 + delay: 5 + until: > + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 + when: jenkins_restart_required + + - name: Reset the fact + ansible.builtin.set_fact: + jenkins_restart_required: false + when: jenkins_restart_required + + - name: Plugin pinning + community.general.jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" + when: > + 'pinned' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Plugin enabling + community.general.jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" + when: > + 'enabled' in item.value + with_dict: "{{ my_jenkins_plugins }}" +''' + +RETURN = ''' +plugin: + description: plugin name + returned: success + type: str + sample: build-pipeline-plugin +state: + description: state of the target, after execution + returned: success + type: str + sample: "present" +''' + +import hashlib +import io +import json +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule, to_bytes +from ansible.module_utils.six.moves import http_cookiejar as cookiejar +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url, url_argument_spec +from ansible.module_utils.six import text_type, binary_type +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.jenkins import download_updates_file + + +class FailedInstallingWithPluginManager(Exception): + pass + + +class JenkinsPlugin(object): + def __init__(self, module): + # To be able to call fail_json + self.module = module + + # Shortcuts for the params + self.params = self.module.params + self.url = self.params['url'] + self.timeout = self.params['timeout'] + + # Crumb + self.crumb = {} + # Cookie jar for crumb session + self.cookies = None + + if self._csrf_enabled(): + self.cookies = cookiejar.LWPCookieJar() + self.crumb = self._get_crumb() + + # Get list of installed plugins + self._get_installed_plugins() + + def _csrf_enabled(self): + csrf_data = self._get_json_data( + "%s/%s" % (self.url, "api/json"), 'CSRF') + + if 'useCrumbs' not in csrf_data: + self.module.fail_json( + msg="Required fields not found in the Crumbs response.", + details=csrf_data) + + return csrf_data['useCrumbs'] + + def _get_json_data(self, url, what, **kwargs): + # Get the JSON data + r = self._get_url_data(url, what, **kwargs) + + # Parse the JSON data + try: + json_data = json.loads(to_native(r.read())) + except Exception as e: + self.module.fail_json( + msg="Cannot parse %s JSON data." % what, + details=to_native(e)) + + return json_data + + def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + errors = {} + for url in urls: + err_msg = None + try: + self.module.debug("fetching url: %s" % url) + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb, **kwargs) + + if info['status'] == 200: + return response + else: + err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status'])) + if info['status'] > 400: # extend error message + err_msg = "%s. response body: %s" % (err_msg, info['body']) + except Exception as e: + err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e)) + finally: + if err_msg is not None: + self.module.debug(err_msg) + errors[url] = err_msg + + # failed on all urls + self.module.fail_json(msg=msg_exception, details=errors) + + def _get_url_data( + self, url, what=None, msg_status=None, msg_exception=None, + dont_fail=False, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + # Get the URL data + try: + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb, **kwargs) + + if info['status'] != 200: + if dont_fail: + raise FailedInstallingWithPluginManager(info['msg']) + else: + self.module.fail_json(msg=msg_status, details=info['msg']) + except Exception as e: + if dont_fail: + raise FailedInstallingWithPluginManager(e) + else: + self.module.fail_json(msg=msg_exception, details=to_native(e)) + + return response + + def _get_crumb(self): + crumb_data = self._get_json_data( + "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') + + if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: + ret = { + crumb_data['crumbRequestField']: crumb_data['crumb'] + } + else: + self.module.fail_json( + msg="Required fields not found in the Crum response.", + details=crumb_data) + + return ret + + def _get_installed_plugins(self): + plugins_data = self._get_json_data( + "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), + 'list of plugins') + + # Check if we got valid data + if 'plugins' not in plugins_data: + self.module.fail_json(msg="No valid plugin data found.") + + # Create final list of installed/pined plugins + self.is_installed = False + self.is_pinned = False + self.is_enabled = False + + for p in plugins_data['plugins']: + if p['shortName'] == self.params['name']: + self.is_installed = True + + if p['pinned']: + self.is_pinned = True + + if p['enabled']: + self.is_enabled = True + + break + + def _install_with_plugin_manager(self): + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + data = urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data, + dont_fail=True) + + hpi_file = '%s/plugins/%s.hpi' % ( + self.params['jenkins_home'], + self.params['name']) + + if os.path.isfile(hpi_file): + os.remove(hpi_file) + + def install(self): + changed = False + plugin_file = ( + '%s/plugins/%s.jpi' % ( + self.params['jenkins_home'], + self.params['name'])) + + if not self.is_installed and self.params['version'] in [None, 'latest']: + try: + self._install_with_plugin_manager() + changed = True + except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin + pass + + if not changed: + # Check if the plugin directory exists + if not os.path.isdir(self.params['jenkins_home']): + self.module.fail_json( + msg="Jenkins home directory doesn't exist.") + + checksum_old = None + if os.path.isfile(plugin_file): + # Make the checksum of the currently installed plugin + with open(plugin_file, 'rb') as plugin_fh: + plugin_content = plugin_fh.read() + checksum_old = hashlib.sha1(plugin_content).hexdigest() + + if self.params['version'] in [None, 'latest']: + # Take latest version + plugin_urls = self._get_latest_plugin_urls() + else: + # Take specific version + plugin_urls = self._get_versioned_plugin_urls() + if ( + self.params['updates_expiration'] == 0 or + self.params['version'] not in [None, 'latest'] or + checksum_old is None): + + # Download the plugin file directly + r = self._download_plugin(plugin_urls) + + # Write downloaded plugin into file if checksums don't match + if checksum_old is None: + # No previously installed plugin + if not self.module.check_mode: + self._write_file(plugin_file, r) + + changed = True + else: + # Get data for the MD5 + data = r.read() + + # Make new checksum + checksum_new = hashlib.sha1(data).hexdigest() + + # If the checksum is different from the currently installed + # plugin, store the new plugin + if checksum_old != checksum_new: + if not self.module.check_mode: + self._write_file(plugin_file, data) + + changed = True + elif self.params['version'] == 'latest': + # Check for update from the updates JSON file + plugin_data = self._download_updates() + + # If the latest version changed, download it + if checksum_old != to_bytes(plugin_data['sha1']): + if not self.module.check_mode: + r = self._download_plugin(plugin_urls) + self._write_file(plugin_file, r) + + changed = True + + # Change file attributes if needed + if os.path.isfile(plugin_file): + params = { + 'dest': plugin_file + } + params.update(self.params) + file_args = self.module.load_file_common_arguments(params) + + if not self.module.check_mode: + # Not sure how to run this in the check mode + changed = self.module.set_fs_attributes_if_different( + file_args, changed) + else: + # See the comment above + changed = True + + return changed + + def _get_latest_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_segment in self.params['latest_plugins_url_segments']: + urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name'])) + return urls + + def _get_versioned_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for versioned_segment in self.params['versioned_plugins_url_segments']: + urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version'])) + return urls + + def _get_update_center_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['update_json_url_segment']: + urls.append("{0}/{1}".format(base_url, update_json)) + return urls + + def _download_updates(self): + try: + updates_file, download_updates = download_updates_file(self.params['updates_expiration']) + except OSError as e: + self.module.fail_json( + msg="Cannot create temporal directory.", + details=to_native(e)) + + # Download the updates file if needed + if download_updates: + urls = self._get_update_center_urls() + + # Get the data + r = self._get_urls_data( + urls, + msg_status="Remote updates not found.", + msg_exception="Updates download failed.") + + # Write the updates file + tmp_update_fd, tmp_updates_file = tempfile.mkstemp() + os.write(tmp_update_fd, r.read()) + + try: + os.close(tmp_update_fd) + except IOError as e: + self.module.fail_json( + msg="Cannot close the tmp updates file %s." % tmp_updates_file, + details=to_native(e)) + else: + tmp_updates_file = updates_file + + # Open the updates file + try: + f = io.open(tmp_updates_file, encoding='utf-8') + + # Read only the second line + dummy = f.readline() + data = json.loads(f.readline()) + except IOError as e: + self.module.fail_json( + msg="Cannot open%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), + details=to_native(e)) + except Exception as e: + self.module.fail_json( + msg="Cannot load JSON data from the%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), + details=to_native(e)) + + # Move the updates file to the right place if we could read it + if tmp_updates_file != updates_file: + self.module.atomic_move(tmp_updates_file, updates_file) + + # Check if we have the plugin data available + if not data.get('plugins', {}).get(self.params['name']): + self.module.fail_json(msg="Cannot find plugin data in the updates file.") + + return data['plugins'][self.params['name']] + + def _download_plugin(self, plugin_urls): + # Download the plugin + + return self._get_urls_data( + plugin_urls, + msg_status="Plugin not found.", + msg_exception="Plugin download failed.") + + def _write_file(self, f, data): + # Store the plugin into a temp file and then move it + tmp_f_fd, tmp_f = tempfile.mkstemp() + + if isinstance(data, (text_type, binary_type)): + os.write(tmp_f_fd, data) + else: + os.write(tmp_f_fd, data.read()) + + try: + os.close(tmp_f_fd) + except IOError as e: + self.module.fail_json( + msg='Cannot close the temporal plugin file %s.' % tmp_f, + details=to_native(e)) + + # Move the file onto the right place + self.module.atomic_move(tmp_f, f) + + def uninstall(self): + changed = False + + # Perform the action + if self.is_installed: + if not self.module.check_mode: + self._pm_query('doUninstall', 'Uninstallation') + + changed = True + + return changed + + def pin(self): + return self._pinning('pin') + + def unpin(self): + return self._pinning('unpin') + + def _pinning(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'pin' and not self.is_pinned or + action == 'unpin' and self.is_pinned): + + # Perform the action + if not self.module.check_mode: + self._pm_query(action, "%sning" % action.capitalize()) + + changed = True + + return changed + + def enable(self): + return self._enabling('enable') + + def disable(self): + return self._enabling('disable') + + def _enabling(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'enable' and not self.is_enabled or + action == 'disable' and self.is_enabled): + + # Perform the action + if not self.module.check_mode: + self._pm_query( + "make%sd" % action.capitalize(), + "%sing" % action[:-1].capitalize()) + + changed = True + + return changed + + def _pm_query(self, action, msg): + url = "%s/pluginManager/plugin/%s/%s" % ( + self.params['url'], self.params['name'], action) + + # Send the request + self._get_url_data( + url, + msg_status="Plugin not found. %s" % url, + msg_exception="%s has failed." % msg, + method="POST") + + +def main(): + # Module arguments + argument_spec = url_argument_spec() + argument_spec.update( + group=dict(type='str', default='jenkins'), + jenkins_home=dict(type='path', default='/var/lib/jenkins'), + mode=dict(default='0644', type='raw'), + name=dict(type='str', required=True), + owner=dict(type='str', default='jenkins'), + state=dict( + choices=[ + 'present', + 'absent', + 'pinned', + 'unpinned', + 'enabled', + 'disabled', + 'latest'], + default='present'), + timeout=dict(default=30, type="int"), + updates_expiration=dict(default=86400, type="int"), + updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', + 'http://mirrors.jenkins.io']), + update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', + 'updates/update-center.json']), + latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), + versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), + url=dict(default='http://localhost:8080'), + url_password=dict(no_log=True), + version=dict(), + with_dependencies=dict(default=True, type='bool'), + ) + # Module settings + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True, + ) + + # Force basic authentication + module.params['force_basic_auth'] = True + + # Convert timeout to float + try: + module.params['timeout'] = float(module.params['timeout']) + except ValueError as e: + module.fail_json( + msg='Cannot convert %s to float.' % module.params['timeout'], + details=to_native(e)) + + # Set version to latest if state is latest + if module.params['state'] == 'latest': + module.params['state'] = 'present' + module.params['version'] = 'latest' + + # Create some shortcuts + name = module.params['name'] + state = module.params['state'] + + # Initial change state of the task + changed = False + + # Instantiate the JenkinsPlugin object + jp = JenkinsPlugin(module) + + # Perform action depending on the requested state + if state == 'present': + changed = jp.install() + elif state == 'absent': + changed = jp.uninstall() + elif state == 'pinned': + changed = jp.pin() + elif state == 'unpinned': + changed = jp.unpin() + elif state == 'enabled': + changed = jp.enable() + elif state == 'disabled': + changed = jp.disable() + + # Print status of the change + module.exit_json(changed=changed, plugin=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jenkins_script.py b/ansible_collections/community/general/plugins/modules/jenkins_script.py new file mode 100644 index 000000000..7f83ebcdb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jenkins_script.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, James Hogarth +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +author: James Hogarth (@hogarthj) +module: jenkins_script +short_description: Executes a groovy script in the jenkins instance +description: + - The C(jenkins_script) module takes a script plus a dict of values + to use within the script and returns the result of the script being run. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + script: + type: str + description: + - The groovy script to be executed. + This gets passed as a string Template if args is defined. + required: true + url: + type: str + description: + - The jenkins server to execute the script against. The default is a local + jenkins instance that is not being proxied through a webserver. + default: http://localhost:8080 + validate_certs: + description: + - If set to C(false), the SSL certificates will not be validated. + This should only set to C(false) used on personally controlled sites + using self-signed certificates as it avoids verifying the source site. + type: bool + default: true + user: + type: str + description: + - The username to connect to the jenkins server with. + password: + type: str + description: + - The password to connect to the jenkins server with. + timeout: + type: int + description: + - The request timeout in seconds + default: 10 + args: + type: dict + description: + - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). + +notes: + - Since the script can do anything this does not report on changes. + Knowing the script is being run it's important to set changed_when + for the ansible output to be clear on any alterations made. + +''' + +EXAMPLES = ''' +- name: Obtaining a list of plugins + community.general.jenkins_script: + script: 'println(Jenkins.instance.pluginManager.plugins)' + user: admin + password: admin + +- name: Setting master using a variable to hold a more complicate script + ansible.builtin.set_fact: + setmaster_mode: | + import jenkins.model.* + instance = Jenkins.getInstance() + instance.setMode(${jenkins_mode}) + instance.save() + +- name: Use the variable as the script + community.general.jenkins_script: + script: "{{ setmaster_mode }}" + args: + jenkins_mode: Node.Mode.EXCLUSIVE + +- name: Interacting with an untrusted HTTPS connection + community.general.jenkins_script: + script: "println(Jenkins.instance.pluginManager.plugins)" + user: admin + password: admin + url: https://localhost + validate_certs: false +''' + +RETURN = ''' +output: + description: Result of script + returned: success + type: str + sample: 'Result: true' +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import http_cookiejar as cookiejar +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_native + + +def is_csrf_protection_enabled(module): + resp, info = fetch_url(module, + module.params['url'] + '/api/json', + timeout=module.params['timeout'], + method='GET') + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content).get('useCrumbs', False) + + +def get_crumb(module, cookies): + resp, info = fetch_url(module, + module.params['url'] + '/crumbIssuer/api/json', + method='GET', + timeout=module.params['timeout'], + cookies=cookies) + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + content = to_native(resp.read()) + return json.loads(content) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + script=dict(required=True, type="str"), + url=dict(required=False, type="str", default="http://localhost:8080"), + validate_certs=dict(required=False, type="bool", default=True), + user=dict(required=False, type="str", default=None), + password=dict(required=False, no_log=True, type="str", default=None), + timeout=dict(required=False, type="int", default=10), + args=dict(required=False, type="dict", default=None) + ) + ) + + if module.params['user'] is not None: + if module.params['password'] is None: + module.fail_json(msg="password required when user provided", output='') + module.params['url_username'] = module.params['user'] + module.params['url_password'] = module.params['password'] + module.params['force_basic_auth'] = True + + if module.params['args'] is not None: + from string import Template + try: + script_contents = Template(module.params['script']).substitute(module.params['args']) + except KeyError as err: + module.fail_json(msg="Error with templating variable: %s" % err, output='') + else: + script_contents = module.params['script'] + + headers = {} + cookies = None + if is_csrf_protection_enabled(module): + cookies = cookiejar.LWPCookieJar() + crumb = get_crumb(module, cookies) + headers = {crumb['crumbRequestField']: crumb['crumb']} + + resp, info = fetch_url(module, + module.params['url'] + "/scriptText", + data=urlencode({'script': script_contents}), + headers=headers, + method="POST", + timeout=module.params['timeout'], + cookies=cookies) + + if info["status"] != 200: + module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='') + + result = to_native(resp.read()) + + if 'Exception:' in result and 'at java.lang.Thread' in result: + module.fail_json(msg="script failed with stacktrace:\n " + result, output='') + + module.exit_json( + output=result, + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/jira.py b/ansible_collections/community/general/plugins/modules/jira.py new file mode 100644 index 000000000..85097c4b7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/jira.py @@ -0,0 +1,828 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Steve Smith +# Atlassian open-source approval reference OSR-76. +# +# Copyright (c) 2020, Per Abildgaard Toft Search and update function +# Copyright (c) 2021, Brandon McNama Issue attachment functionality +# Copyright (c) 2022, Hugo Prudente Worklog functionality +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r""" +module: jira +short_description: Create and modify issues in a JIRA instance +description: + - Create and modify issues in a JIRA instance. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uri: + type: str + required: true + description: + - Base URI for the JIRA instance. + + operation: + type: str + required: true + aliases: [ command ] + choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ] + description: + - The operation to perform. + - C(worklog) was added in community.genereal 6.5.0. + + username: + type: str + description: + - The username to log-in with. + - Must be used with I(password). Mutually exclusive with I(token). + + password: + type: str + description: + - The password to log-in with. + - Must be used with I(username). Mutually exclusive with I(token). + + token: + type: str + description: + - The personal access token to log-in with. + - Mutually exclusive with I(username) and I(password). + version_added: 4.2.0 + + project: + type: str + required: false + description: + - The project for this operation. Required for issue creation. + + summary: + type: str + required: false + description: + - The issue summary, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. + + description: + type: str + required: false + description: + - The issue description, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. + + issuetype: + type: str + required: false + description: + - The issue type, for issue creation. + + issue: + type: str + required: false + description: + - An existing issue key to operate on. + aliases: ['ticket'] + + comment: + type: str + required: false + description: + - The comment text to add. + - Note that JIRA may not allow changing field values on specific transitions or states. + + comment_visibility: + type: dict + description: + - Used to specify comment comment visibility. + - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details. + suboptions: + type: + description: + - Use type to specify which of the JIRA visibility restriction types will be used. + type: str + required: true + choices: [group, role] + value: + description: + - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role. + type: str + required: true + version_added: '3.2.0' + + status: + type: str + required: false + description: + - Only used when I(operation) is C(transition), and a bit of a misnomer, it actually refers to the transition name. + + assignee: + type: str + required: false + description: + - Sets the the assignee when I(operation) is C(create), C(transition) or C(edit). + - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use I(account_id) instead. + - Note that JIRA may not allow changing field values on specific transitions or states. + + account_id: + type: str + description: + - Sets the account identifier for the assignee when I(operation) is C(create), C(transition) or C(edit). + - Note that JIRA may not allow changing field values on specific transitions or states. + version_added: 2.5.0 + + linktype: + type: str + required: false + description: + - Set type of link, when action 'link' selected. + + inwardissue: + type: str + required: false + description: + - Set issue from which link will be created. + + outwardissue: + type: str + required: false + description: + - Set issue to which link will be created. + + fields: + type: dict + required: false + description: + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API + (possibly after merging with other required data, as when passed to create). See examples for more information, + and the JIRA REST API for the structure required for various fields. + - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example. + - Note that JIRA may not allow changing field values on specific transitions or states. + default: {} + jql: + required: false + description: + - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'. + type: str + version_added: '0.2.0' + + maxresults: + required: false + description: + - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used. + - Used when I(operation=search) only, ignored otherwise. + type: int + version_added: '0.2.0' + + timeout: + type: float + required: false + description: + - Set timeout, in seconds, on requests to JIRA API. + default: 10 + + validate_certs: + required: false + description: + - Require valid SSL certificates (set to C(false) if you'd like to use self-signed certificates) + default: true + type: bool + + attachment: + type: dict + version_added: 2.5.0 + description: + - Information about the attachment being uploaded. + suboptions: + filename: + required: true + type: path + description: + - The path to the file to upload (from the remote node) or, if I(content) is specified, + the filename to use for the attachment. + content: + type: str + description: + - The Base64 encoded contents of the file to attach. If not specified, the contents of I(filename) will be + used instead. + mimetype: + type: str + description: + - The MIME type to supply for the upload. If not specified, best-effort detection will be + done. + +notes: + - "Currently this only works with basic-auth, or tokens." + - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)." + +author: +- "Steve Smith (@tarka)" +- "Per Abildgaard Toft (@pertoft)" +- "Brandon McNama (@DWSR)" +""" + +EXAMPLES = r""" +# Create a new issue and add a comment to it: +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Example Issue + description: Created using Ansible + issuetype: Task + args: + fields: + customfield_13225: "test" + customfield_12931: {"value": "Test"} + register: issue + +- name: Comment on issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + +- name: Comment on issue with restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + comment_visibility: + type: role + value: Developers + +- name: Comment on issue with property to mark it internal + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + fields: + properties: + - key: 'sd.public.comment' + value: + internal: true + +# Add an workog to an existing issue +- name: Worklog on issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + comment_visibility: + type: role + value: Developers + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment property to mark it internal + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + properties: + - key: 'sd.public.comment' + value: + internal: true + timeSpentSeconds: 12000 + +# Assign an existing issue using edit +- name: Assign an issue using free-form fields + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key}}' + operation: edit + assignee: ssmith + +# Create an issue with an existing assignee +- name: Create an assigned issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Assigned issue + description: Created and assigned using Ansible + issuetype: Task + assignee: ssmith + +# Edit an issue +- name: Set the labels on an issue using free-form fields + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: edit + args: + fields: + labels: + - autocreated + - ansible + +# Updating a field using operations: add, set & remove +- name: Change the value of a Select dropdown + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: update + args: + fields: + customfield_12931: [ {'set': {'value': 'Virtual'}} ] + customfield_13820: [ {'set': {'value':'Manually'}} ] + register: cmdb_issue + delegate_to: localhost + + +# Retrieve metadata for an issue and use it to create an account +- name: Get an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: fetch + issue: ANS-63 + register: issue + +# Search for an issue +# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null +- name: Search for an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: search + maxresults: 10 + jql: project=cmdb AND cf[13225]="test" + args: + fields: + lastViewed: null + register: issue + +- name: Create a unix account for the reporter + become: true + user: + name: '{{ issue.meta.fields.creator.name }}' + comment: '{{ issue.meta.fields.creator.displayName }}' + +# You can get list of valid linktypes at /rest/api/2/issueLinkType +# url of your jira installation. +- name: Create link from HSP-1 to MKY-1 + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + operation: link + linktype: Relates + inwardissue: HSP-1 + outwardissue: MKY-1 + +# Transition an issue +- name: Resolve the issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: transition + status: Resolve Issue + account_id: 112233445566778899aabbcc + fields: + resolution: + name: Done + description: I am done! This is the last description I will ever give you. + +# Attach a file to an issue +- name: Attach a file + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: HSP-1 + operation: attach + attachment: + filename: topsecretreport.xlsx +""" + +import base64 +import binascii +import json +import mimetypes +import os +import random +import string +import traceback + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes +from ansible.module_utils.six.moves.urllib.request import pathname2url +from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native +from ansible.module_utils.urls import fetch_url + + +class JIRA(StateModuleHelper): + module = dict( + argument_spec=dict( + attachment=dict(type='dict', options=dict( + content=dict(type='str'), + filename=dict(type='path', required=True), + mimetype=dict(type='str') + )), + uri=dict(type='str', required=True), + operation=dict( + type='str', + choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'], + aliases=['command'], required=True + ), + username=dict(type='str'), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + project=dict(type='str', ), + summary=dict(type='str', ), + description=dict(type='str', ), + issuetype=dict(type='str', ), + issue=dict(type='str', aliases=['ticket']), + comment=dict(type='str', ), + comment_visibility=dict(type='dict', options=dict( + type=dict(type='str', choices=['group', 'role'], required=True), + value=dict(type='str', required=True) + )), + status=dict(type='str', ), + assignee=dict(type='str', ), + fields=dict(default={}, type='dict'), + linktype=dict(type='str', ), + inwardissue=dict(type='str', ), + outwardissue=dict(type='str', ), + jql=dict(type='str', ), + maxresults=dict(type='int'), + timeout=dict(type='float', default=10), + validate_certs=dict(default=True, type='bool'), + account_id=dict(type='str'), + ), + mutually_exclusive=[ + ['username', 'token'], + ['password', 'token'], + ['assignee', 'account_id'], + ], + required_together=[ + ['username', 'password'], + ], + required_one_of=[ + ['username', 'token'], + ], + required_if=( + ('operation', 'attach', ['issue', 'attachment']), + ('operation', 'create', ['project', 'issuetype', 'summary']), + ('operation', 'comment', ['issue', 'comment']), + ('operation', 'workflow', ['issue', 'comment']), + ('operation', 'fetch', ['issue']), + ('operation', 'transition', ['issue', 'status']), + ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), + ('operation', 'search', ['jql']), + ), + supports_check_mode=False + ) + + state_param = 'operation' + + def __init_module__(self): + if self.vars.fields is None: + self.vars.fields = {} + if self.vars.assignee: + self.vars.fields['assignee'] = {'name': self.vars.assignee} + if self.vars.account_id: + self.vars.fields['assignee'] = {'accountId': self.vars.account_id} + self.vars.uri = self.vars.uri.strip('/') + self.vars.set('restbase', self.vars.uri + '/rest/api/2') + + @cause_changes(on_success=True) + def operation_create(self): + createfields = { + 'project': {'key': self.vars.project}, + 'summary': self.vars.summary, + 'issuetype': {'name': self.vars.issuetype}} + + if self.vars.description: + createfields['description'] = self.vars.description + + # Merge in any additional or overridden fields + if self.vars.fields: + createfields.update(self.vars.fields) + + data = {'fields': createfields} + url = self.vars.restbase + '/issue/' + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_comment(self): + data = { + 'body': self.vars.comment + } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + + # Use 'fields' to merge in any additional data + if self.vars.fields: + data.update(self.vars.fields) + + url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_worklog(self): + data = { + 'comment': self.vars.comment + } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + + # Use 'fields' to merge in any additional data + if self.vars.fields: + data.update(self.vars.fields) + + url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog' + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_edit(self): + data = { + 'fields': self.vars.fields + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + @cause_changes(on_success=True) + def operation_update(self): + data = { + "update": self.vars.fields, + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + def operation_fetch(self): + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.get(url) + + def operation_search(self): + url = self.vars.restbase + '/search?jql=' + pathname2url(self.vars.jql) + if self.vars.fields: + fields = self.vars.fields.keys() + url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields]) + if self.vars.maxresults: + url = url + '&maxResults=' + str(self.vars.maxresults) + + self.vars.meta = self.get(url) + + @cause_changes(on_success=True) + def operation_transition(self): + # Find the transition id + turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + tmeta = self.get(turl) + + target = self.vars.status + tid = None + for t in tmeta['transitions']: + if t['name'] == target: + tid = t['id'] + break + else: + raise ValueError("Failed find valid transition for '%s'" % target) + + fields = dict(self.vars.fields) + if self.vars.summary is not None: + fields.update({'summary': self.vars.summary}) + if self.vars.description is not None: + fields.update({'description': self.vars.description}) + + # Perform it + data = {'transition': {"id": tid}, + 'fields': fields} + if self.vars.comment is not None: + data.update({"update": { + "comment": [{ + "add": {"body": self.vars.comment} + }], + }}) + url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_link(self): + data = { + 'type': {'name': self.vars.linktype}, + 'inwardIssue': {'key': self.vars.inwardissue}, + 'outwardIssue': {'key': self.vars.outwardissue}, + } + url = self.vars.restbase + '/issueLink/' + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_attach(self): + v = self.vars + filename = v.attachment.get('filename') + content = v.attachment.get('content') + + if not any((filename, content)): + raise ValueError('at least one of filename or content must be provided') + mime = v.attachment.get('mimetype') + + if not os.path.isfile(filename): + raise ValueError('The provided filename does not exist: %s' % filename) + + content_type, data = self._prepare_attachment(filename, content, mime) + + url = v.restbase + '/issue/' + v.issue + '/attachments' + return True, self.post( + url, data, content_type=content_type, additional_headers={"X-Atlassian-Token": "no-check"} + ) + + # Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but + # unfortunately it does not support specifying the encoding and also defaults to + # base64. Jira doesn't support base64 encoded attachments (and is therefore not + # spec compliant. Go figure). I originally wrote this function as an almost + # exact copypasta of prepare_multipart, but ran into some encoding issues when + # using the noop encoder. Hand rolling the entire message body seemed to work + # out much better. + # + # https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427 + # + # content is expected to be a base64 encoded string since Ansible doesn't + # support passing raw bytes objects. + @staticmethod + def _prepare_attachment(filename, content=None, mime_type=None): + def escape_quotes(s): + return s.replace('"', '\\"') + + boundary = "".join(random.choice(string.digits + string.ascii_letters) for dummy in range(30)) + name = to_native(os.path.basename(filename)) + + if not mime_type: + try: + mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' + except Exception: + mime_type = 'application/octet-stream' + main_type, sep, sub_type = mime_type.partition('/') + + if not content and filename: + with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: + content = f.read() + else: + try: + content = base64.b64decode(content) + except binascii.Error as e: + raise Exception("Unable to base64 decode file content: %s" % e) + + lines = [ + "--{0}".format(boundary), + 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)), + "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)), + '', + to_text(content), + "--{0}--".format(boundary), + "" + ] + + return ( + "multipart/form-data; boundary={0}".format(boundary), + "\r\n".join(lines) + ) + + def request( + self, + url, + data=None, + method=None, + content_type='application/json', + additional_headers=None + ): + if data and content_type == 'application/json': + data = json.dumps(data) + + headers = {} + if isinstance(additional_headers, dict): + headers = additional_headers.copy() + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + + if self.vars.token is not None: + headers.update({ + "Content-Type": content_type, + "Authorization": "Bearer %s" % self.vars.token, + }) + else: + auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.vars.username, self.vars.password), + errors='surrogate_or_strict'))) + headers.update({ + "Content-Type": content_type, + "Authorization": "Basic %s" % auth, + }) + + response, info = fetch_url( + self.module, url, data=data, method=method, timeout=self.vars.timeout, headers=headers + ) + + if info['status'] not in (200, 201, 204): + error = None + try: + error = json.loads(info['body']) + except Exception: + msg = 'The request "{method} {url}" returned the unexpected status code {status} {msg}\n{body}'.format( + status=info['status'], + msg=info['msg'], + body=info.get('body'), + url=url, + method=method, + ) + self.module.fail_json(msg=to_native(msg), exception=traceback.format_exc()) + if error: + msg = [] + for key in ('errorMessages', 'errors'): + if error.get(key): + msg.append(to_native(error[key])) + if msg: + self.module.fail_json(msg=', '.join(msg)) + self.module.fail_json(msg=to_native(error)) + # Fallback print body, if it cant be decoded + self.module.fail_json(msg=to_native(info['body'])) + + body = response.read() + + if body: + return json.loads(to_text(body, errors='surrogate_or_strict')) + return {} + + def post(self, url, data, content_type='application/json', additional_headers=None): + return self.request(url, data=data, method='POST', content_type=content_type, + additional_headers=additional_headers) + + def put(self, url, data): + return self.request(url, data=data, method='PUT') + + def get(self, url): + return self.request(url) + + +def main(): + jira = JIRA() + jira.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/kdeconfig.py b/ansible_collections/community/general/plugins/modules/kdeconfig.py new file mode 100644 index 000000000..42a08dd64 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/kdeconfig.py @@ -0,0 +1,277 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Salvatore Mesoraca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: kdeconfig +short_description: Manage KDE configuration files +version_added: "6.5.0" +description: + - Add or change individual settings in KDE configuration files. + - It uses B(kwriteconfig) under the hood. + +options: + path: + description: + - Path to the config file. If the file does not exist it will be created. + type: path + required: true + kwriteconfig_path: + description: + - Path to the kwriteconfig executable. If not specified, Ansible will try + to discover it. + type: path + values: + description: + - List of values to set. + type: list + elements: dict + suboptions: + group: + description: + - The option's group. One between this and I(groups) is required. + type: str + groups: + description: + - List of the option's groups. One between this and I(group) is required. + type: list + elements: str + key: + description: + - The option's name. + type: str + required: true + value: + description: + - The option's value. One between this and I(bool_value) is required. + type: str + bool_value: + description: + - Boolean value. + - One between this and I(value) is required. + type: bool + required: true + backup: + description: + - Create a backup file. + type: bool + default: false +extends_documentation_fragment: + - files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +requirements: + - kwriteconfig +author: + - Salvatore Mesoraca (@smeso) +''' + +EXAMPLES = r''' +- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding" + community.general.kdeconfig: + path: /etc/xdg/kickoffrc + values: + - group: Branding + key: Homepage + value: https://www.ansible.com/ + mode: '0644' + +- name: Ensure "KEY=true" in groups "Group" and "Subgroup", and "KEY=VALUE" in Group2 + community.general.kdeconfig: + path: /etc/xdg/someconfigrc + values: + - groups: [Group, Subgroup] + key: KEY + bool_value: true + - group: Group2 + key: KEY + value: VALUE + backup: true +''' + +RETURN = r''' # ''' + +import os +import shutil +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +class TemporaryDirectory(object): + """Basic backport of tempfile.TemporaryDirectory""" + + def __init__(self, suffix="", prefix="tmp", dir=None): + self.name = None + self.name = tempfile.mkdtemp(suffix, prefix, dir) + + def __enter__(self): + return self.name + + def rm(self): + if self.name: + shutil.rmtree(self.name, ignore_errors=True) + self.name = None + + def __exit__(self, exc, value, tb): + self.rm() + + def __del__(self): + self.rm() + + +def run_kwriteconfig(module, cmd, path, groups, key, value): + """Invoke kwriteconfig with arguments""" + args = [cmd, '--file', path, '--key', key] + for group in groups: + args.extend(['--group', group]) + if isinstance(value, bool): + args.extend(['--type', 'bool']) + if value: + args.append('true') + else: + args.append('false') + else: + args.append(value) + module.run_command(args, check_rc=True) + + +def run_module(module, tmpdir, kwriteconfig): + result = dict(changed=False, msg='OK', path=module.params['path']) + b_path = to_bytes(module.params['path']) + tmpfile = os.path.join(tmpdir, 'file') + b_tmpfile = to_bytes(tmpfile) + diff = dict( + before='', + after='', + before_header=result['path'], + after_header=result['path'], + ) + try: + with open(b_tmpfile, 'wb') as dst: + try: + with open(b_path, 'rb') as src: + b_data = src.read() + except IOError: + result['changed'] = True + else: + dst.write(b_data) + try: + diff['before'] = to_text(b_data) + except UnicodeError: + diff['before'] = repr(b_data) + except IOError: + module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc()) + + for row in module.params['values']: + groups = row['groups'] + if groups is None: + groups = [row['group']] + key = row['key'] + value = row['bool_value'] + if value is None: + value = row['value'] + run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value) + + with open(b_tmpfile, 'rb') as tmpf: + b_data = tmpf.read() + try: + diff['after'] = to_text(b_data) + except UnicodeError: + diff['after'] = repr(b_data) + + result['changed'] = result['changed'] or diff['after'] != diff['before'] + + file_args = module.load_file_common_arguments(module.params) + + if module.check_mode: + if not result['changed']: + shutil.copystat(b_path, b_tmpfile) + uid, gid = module.user_and_group(b_path) + os.chown(b_tmpfile, uid, gid) + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + if result['changed']: + if module.params['backup'] and os.path.exists(b_path): + result['backup_file'] = module.backup_local(result['path']) + try: + module.atomic_move(b_tmpfile, b_path) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc()) + + if result['changed']: + module.set_fs_attributes_if_different(file_args, result['changed']) + else: + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + +def main(): + single_value_arg = dict(group=dict(type='str'), + groups=dict(type='list', elements='str'), + key=dict(type='str', required=True, no_log=False), + value=dict(type='str'), + bool_value=dict(type='bool')) + required_alternatives = [('group', 'groups'), ('value', 'bool_value')] + module_args = dict( + values=dict(type='list', + elements='dict', + options=single_value_arg, + mutually_exclusive=required_alternatives, + required_one_of=required_alternatives, + required=True), + path=dict(type='path', required=True), + kwriteconfig_path=dict(type='path'), + backup=dict(type='bool', default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + add_file_common_args=True, + supports_check_mode=True, + ) + + kwriteconfig = None + if module.params['kwriteconfig_path'] is not None: + kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True) + else: + for progname in ('kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'): + kwriteconfig = module.get_bin_path(progname) + if kwriteconfig is not None: + break + if kwriteconfig is None: + module.fail_json(msg='kwriteconfig is not installed') + for v in module.params['values']: + if not v['key']: + module.fail_json(msg="'key' cannot be empty") + with TemporaryDirectory(dir=module.tmpdir) as tmpdir: + run_module(module, tmpdir, kwriteconfig) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py new file mode 100644 index 000000000..1b40999ca --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py @@ -0,0 +1,126 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2013, Matthias Vogelgesang +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: kernel_blacklist +author: + - Matthias Vogelgesang (@matze) +short_description: Blacklist kernel modules +description: + - Add or remove kernel modules from blacklist. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + type: str + description: + - Name of kernel module to black- or whitelist. + required: true + state: + type: str + description: + - Whether the module should be present in the blacklist or absent. + choices: [ absent, present ] + default: present + blacklist_file: + type: str + description: + - If specified, use this blacklist file instead of + C(/etc/modprobe.d/blacklist-ansible.conf). + default: /etc/modprobe.d/blacklist-ansible.conf +''' + +EXAMPLES = ''' +- name: Blacklist the nouveau driver module + community.general.kernel_blacklist: + name: nouveau + state: present +''' + +import os +import re +import tempfile + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class Blacklist(StateModuleHelper): + output_params = ('name', 'state') + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name))) + self.vars.filename = self.vars.blacklist_file + self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True) + if not self.vars.file_exists: + with open(self.vars.filename, 'a'): + pass + self.vars.file_exists = True + self.vars.set('lines', [], change=True, diff=True) + else: + with open(self.vars.filename) as fd: + self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True) + self.vars.set('is_blacklisted', self._is_module_blocked(), change=True) + + def _is_module_blocked(self): + for line in self.vars.lines: + stripped = line.strip() + if stripped.startswith('#'): + continue + if self.pattern.match(stripped): + return True + return False + + def state_absent(self): + if not self.vars.is_blacklisted: + return + self.vars.is_blacklisted = False + self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())] + + def state_present(self): + if self.vars.is_blacklisted: + return + self.vars.is_blacklisted = True + self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name] + + def __quit_module__(self): + if self.has_changed() and not self.module.check_mode: + dummy, tmpfile = tempfile.mkstemp() + try: + os.remove(tmpfile) + self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership + with open(tmpfile, 'w') as fd: + fd.writelines(["{0}\n".format(x) for x in self.vars.lines]) + self.module.atomic_move(tmpfile, self.vars.filename) + finally: + if os.path.exists(tmpfile): + os.remove(tmpfile) + + +def main(): + Blacklist.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py new file mode 100644 index 000000000..6143d9d5c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py @@ -0,0 +1,483 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2019, INSPQ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_authentication + +short_description: Configure authentication in Keycloak + +description: + - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. + - It can also delete the flow. + +version_added: "3.3.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + realm: + description: + - The name of the realm in which is the authentication. + required: true + type: str + alias: + description: + - Alias for the authentication flow. + required: true + type: str + description: + description: + - Description of the flow. + type: str + providerId: + description: + - C(providerId) for the new flow when not copied from an existing flow. + type: str + copyFrom: + description: + - C(flowAlias) of the authentication flow to use for the copy. + type: str + authenticationExecutions: + description: + - Configuration structure for the executions. + type: list + elements: dict + suboptions: + providerId: + description: + - C(providerID) for the new flow when not copied from an existing flow. + type: str + displayName: + description: + - Name of the execution or subflow to create or update. + type: str + requirement: + description: + - Control status of the subflow or execution. + choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] + type: str + flowAlias: + description: + - Alias of parent flow. + type: str + authenticationConfig: + description: + - Describe the config of the authentication. + type: dict + index: + description: + - Priority order of the execution. + type: int + subFlowType: + description: + - For new subflows, optionally specify the type. + - Is only used at creation. + choices: ["basic-flow", "form-flow"] + default: "basic-flow" + type: str + version_added: 6.6.0 + state: + description: + - Control if the authentication flow must exists or not. + choices: [ "present", "absent" ] + default: present + type: str + force: + type: bool + default: false + description: + - If C(true), allows to remove the authentication flow and recreate it. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Philippe Gauthier (@elfelip) + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' + - name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution1.property" + config: + test1.property: "value" + - providerId: "test-execution2" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution2.property" + config: + test2.property: "value" + state: present + + - name: Re-create the authentication flow + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-provisioning" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.provisioning.property" + config: + test.provisioning.property: "value" + state: present + force: true + + - name: Create an authentication flow with subflow containing an execution. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + - displayName: "New Subflow" + requirement: "REQUIRED" + - providerId: "auth-cookie" + requirement: "REQUIRED" + flowAlias: "New Sublow" + state: present + + - name: Remove authentication. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + state: absent +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authentication after module execution. + returned: on success + type: dict + sample: { + "alias": "Copy of first broker login", + "authenticationExecutions": [ + { + "alias": "review profile config", + "authenticationConfig": { + "alias": "review profile config", + "config": { "update.profile.on.first.login": "missing" }, + "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" + }, + "configurable": true, + "displayName": "Review Profile", + "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", + "index": 0, + "level": 0, + "providerId": "idp-review-profile", + "requirement": "REQUIRED", + "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] + } + ], + "builtIn": false, + "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", + "id": "bc228863-5887-4297-b898-4d988f8eaa5c", + "providerId": "basic-flow", + "topLevel": true + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ + import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def find_exec_in_executions(searched_exec, executions): + """ + Search if exec is contained in the executions. + :param searched_exec: Execution to search for. + :param executions: List of executions. + :return: Index of the execution, -1 if not found.. + """ + for i, existing_exec in enumerate(executions, start=0): + if ("providerId" in existing_exec and "providerId" in searched_exec and + existing_exec["providerId"] == searched_exec["providerId"] or + "displayName" in existing_exec and "displayName" in searched_exec and + existing_exec["displayName"] == searched_exec["displayName"]): + return i + return -1 + + +def create_or_update_executions(kc, config, realm='master'): + """ + Create or update executions for an authentication flow. + :param kc: Keycloak API access. + :param config: Representation of the authentication flow including it's executions. + :param realm: Realm + :return: tuple (changed, dict(before, after) + WHERE + bool changed indicates if changes have been made + dict(str, str) shows state before and after creation/update + """ + try: + changed = False + after = "" + before = "" + if "authenticationExecutions" in config: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): + if new_exec["index"] is not None: + new_exec_index = new_exec["index"] + exec_found = False + # Get flowalias parent if given + if new_exec["flowAlias"] is not None: + flow_alias_parent = new_exec["flowAlias"] + else: + flow_alias_parent = config["alias"] + # Check if same providerId or displayName name between existing and new execution + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Remove key that doesn't need to be compared with existing_exec + exclude_key = ["flowAlias", "subFlowType"] + for index_key, key in enumerate(new_exec, start=0): + if new_exec[key] is None: + exclude_key.append(key) + # Compare the executions to see if it need changes + if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: + exec_found = True + before += str(existing_executions[exec_index]) + '\n' + id_to_update = existing_executions[exec_index]["id"] + # Remove exec from list in case 2 exec with same name + existing_executions[exec_index].clear() + elif new_exec["providerId"] is not None: + kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + exec_found = True + exec_index = new_exec_index + id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] + after += str(new_exec) + '\n' + elif new_exec["displayName"] is not None: + kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + exec_found = True + exec_index = new_exec_index + id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] + after += str(new_exec) + '\n' + if exec_found: + changed = True + if exec_index != -1: + # Update the existing execution + updated_exec = { + "id": id_to_update + } + # add the execution configuration + if new_exec["authenticationConfig"] is not None: + kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) + for key in new_exec: + # remove unwanted key for the next API call + if key not in ("flowAlias", "authenticationConfig", "subFlowType"): + updated_exec[key] = new_exec[key] + if new_exec["requirement"] is not None: + kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) + diff = exec_index - new_exec_index + kc.change_execution_priority(updated_exec["id"], diff, realm=realm) + after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' + return changed, dict(before=before, after=after) + except Exception as e: + kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(type='str', required=True), + alias=dict(type='str', required=True), + providerId=dict(type='str'), + description=dict(type='str'), + copyFrom=dict(type='str'), + authenticationExecutions=dict(type='list', elements='dict', + options=dict( + providerId=dict(type='str'), + displayName=dict(type='str'), + requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), + flowAlias=dict(type='str'), + authenticationConfig=dict(type='dict'), + index=dict(type='int'), + subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'), + )), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']]) + ) + + result = dict(changed=False, msg='', flow={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + + new_auth_repr = { + "alias": module.params.get("alias"), + "copyFrom": module.params.get("copyFrom"), + "providerId": module.params.get("providerId"), + "authenticationExecutions": module.params.get("authenticationExecutions"), + "description": module.params.get("description"), + "builtIn": module.params.get("builtIn"), + "subflow": module.params.get("subflow"), + } + + auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) + + # Cater for when it doesn't exist (an empty dict) + if not auth_repr: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = new_auth_repr["alias"] + ' absent' + module.exit_json(**result) + + elif state == 'present': + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=new_auth_repr) + + if module.check_mode: + module.exit_json(**result) + + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + + # Configure the executions for the flow + create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['end_state'] = auth_repr + + else: + if state == 'present': + # Process an update + + if force: # If force option is true + # Delete the actual authentication flow + result['changed'] = True + if module._diff: + result['diff'] = dict(before=auth_repr, after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + + if module.check_mode: + module.exit_json(**result) + changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + result['changed'] |= changed + + if module._diff: + result['diff'] = diff + + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['end_state'] = auth_repr + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=auth_repr, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + + result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], + id=auth_repr["id"]) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py new file mode 100644 index 000000000..c451d3751 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_authz_authorization_scope + +short_description: Allows administration of Keycloak client authorization scopes via Keycloak API + +version_added: 6.6.0 + +description: + - This module allows the administration of Keycloak client Authorization Scopes via the Keycloak REST + API. Authorization Scopes are only available if a client has Authorization enabled. + + - This module requires access to the REST API via OpenID Connect; the user connecting and the realm + being used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. + The Authorization Services paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/) + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the authorization scope. + - On C(present), the authorization scope will be created (or updated if it exists already). + - On C(absent), the authorization scope will be removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the authorization scope to create. + type: str + required: true + display_name: + description: + - The display name of the authorization scope. + type: str + required: false + icon_uri: + description: + - The icon URI for the authorization scope. + type: str + required: false + client_id: + description: + - The C(clientId) of the Keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +''' + +EXAMPLES = ''' +- name: Manage Keycloak file:delete authorization scope + keycloak_authz_authorization_scope: + name: file:delete + state: present + display_name: File delete + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authorization scope after module execution. + returned: on success + type: complex + contains: + id: + description: ID of the authorization scope. + type: str + returned: when I(state=present) + sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41 + name: + description: Name of the authorization scope. + type: str + returned: when I(state=present) + sample: file:delete + display_name: + description: Display name of the authorization scope. + type: str + returned: when I(state=present) + sample: File delete + icon_uri: + description: Icon URI for the authorization scope. + type: str + returned: when I(state=present) + sample: http://localhost/icon.png + +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + display_name=dict(type='str', required=False), + icon_uri=dict(type='str', required=False), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + display_name = module.params.get('display_name') + icon_uri = module.params.get('icon_uri') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + # Get the "id" of the client based on the usually more human-readable + # "clientId" + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the Authorization Scope using its name as the search + # filter. This returns False if it is not found. + before_authz_scope = kc.get_authz_authorization_scope_by_name( + name=name, client_id=cid, realm=realm) + + # Generate a JSON payload for Keycloak Admin API. This is needed for + # "create" and "update" operations. + desired_authz_scope = {} + desired_authz_scope['name'] = name + desired_authz_scope['displayName'] = display_name + desired_authz_scope['iconUri'] = icon_uri + + # Add "id" to payload for modify operations + if before_authz_scope: + desired_authz_scope['id'] = before_authz_scope['id'] + + # Ensure that undefined (null) optional parameters are presented as empty + # strings in the desired state. This makes comparisons with current state + # much easier. + for k, v in desired_authz_scope.items(): + if not v: + desired_authz_scope[k] = '' + + # Do the above for the current state + if before_authz_scope: + for k in ['displayName', 'iconUri']: + if k not in before_authz_scope: + before_authz_scope[k] = '' + + if before_authz_scope and state == 'present': + changes = False + for k, v in desired_authz_scope.items(): + if before_authz_scope[k] != v: + changes = True + # At this point we know we have to update the object anyways, + # so there's no need to do more work. + break + + if changes: + if module._diff: + result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be updated' + module.exit_json(**result) + else: + kc.update_authz_authorization_scope( + payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope updated' + else: + result['changed'] = False + result['msg'] = 'Authorization scope not updated' + + result['end_state'] = desired_authz_scope + elif not before_authz_scope and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be created' + module.exit_json(**result) + else: + kc.create_authz_authorization_scope( + payload=desired_authz_scope, client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope created' + result['end_state'] = desired_authz_scope + elif before_authz_scope and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_authz_scope, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be removed' + module.exit_json(**result) + else: + kc.remove_authz_authorization_scope( + id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope removed' + elif not before_authz_scope and state == 'absent': + result['changed'] = False + else: + module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % ( + name, client_id, realm)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client.py b/ansible_collections/community/general/plugins/modules/keycloak_client.py new file mode 100644 index 000000000..ee687fcb4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_client.py @@ -0,0 +1,984 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_client + +short_description: Allows administration of Keycloak clients via Keycloak API + + +description: + - This module allows the administration of Keycloak clients via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + Aliases are provided so camelCased versions can be used as well. + + - The Keycloak API does not always sanity check inputs e.g. you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the client + - On C(present), the client will be created (or updated if it exists already). + - On C(absent), the client will be removed if it exists + choices: ['present', 'absent'] + default: 'present' + type: str + + realm: + description: + - The realm to create the client in. + type: str + default: master + + client_id: + description: + - Client id of client to be worked on. This is usually an alphanumeric name chosen by + you. Either this or I(id) is required. If you specify both, I(id) takes precedence. + This is 'clientId' in the Keycloak REST API. + aliases: + - clientId + type: str + + id: + description: + - Id of client to be worked on. This is usually an UUID. Either this or I(client_id) + is required. If you specify both, this takes precedence. + type: str + + name: + description: + - Name of the client (this is not the same as I(client_id)). + type: str + + description: + description: + - Description of the client in Keycloak. + type: str + + root_url: + description: + - Root URL appended to relative URLs for this client. + This is 'rootUrl' in the Keycloak REST API. + aliases: + - rootUrl + type: str + + admin_url: + description: + - URL to the admin interface of the client. + This is 'adminUrl' in the Keycloak REST API. + aliases: + - adminUrl + type: str + + base_url: + description: + - Default URL to use when the auth server needs to redirect or link back to the client + This is 'baseUrl' in the Keycloak REST API. + aliases: + - baseUrl + type: str + + enabled: + description: + - Is this client enabled or not? + type: bool + + client_authenticator_type: + description: + - How do clients authenticate with the auth server? Either C(client-secret) or + C(client-jwt) can be chosen. When using C(client-secret), the module parameter + I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url), + C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter + to configure its behavior. + This is 'clientAuthenticatorType' in the Keycloak REST API. + choices: ['client-secret', 'client-jwt'] + aliases: + - clientAuthenticatorType + type: str + + secret: + description: + - When using I(client_authenticator_type) C(client-secret) (the default), you can + specify a secret here (otherwise one will be generated if it does not exit). If + changing this secret, the module will not register a change currently (but the + changed secret will be saved). + type: str + + registration_access_token: + description: + - The registration access token provides access for clients to the client registration + service. + This is 'registrationAccessToken' in the Keycloak REST API. + aliases: + - registrationAccessToken + type: str + + default_roles: + description: + - list of default roles for this client. If the client roles referenced do not exist + yet, they will be created. + This is 'defaultRoles' in the Keycloak REST API. + aliases: + - defaultRoles + type: list + elements: str + + redirect_uris: + description: + - Acceptable redirect URIs for this client. + This is 'redirectUris' in the Keycloak REST API. + aliases: + - redirectUris + type: list + elements: str + + web_origins: + description: + - List of allowed CORS origins. + This is 'webOrigins' in the Keycloak REST API. + aliases: + - webOrigins + type: list + elements: str + + not_before: + description: + - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). + This is 'notBefore' in the Keycloak REST API. + type: int + aliases: + - notBefore + + bearer_only: + description: + - The access type of this client is bearer-only. + This is 'bearerOnly' in the Keycloak REST API. + aliases: + - bearerOnly + type: bool + + consent_required: + description: + - If enabled, users have to consent to client access. + This is 'consentRequired' in the Keycloak REST API. + aliases: + - consentRequired + type: bool + + standard_flow_enabled: + description: + - Enable standard flow for this client or not (OpenID connect). + This is 'standardFlowEnabled' in the Keycloak REST API. + aliases: + - standardFlowEnabled + type: bool + + implicit_flow_enabled: + description: + - Enable implicit flow for this client or not (OpenID connect). + This is 'implicitFlowEnabled' in the Keycloak REST API. + aliases: + - implicitFlowEnabled + type: bool + + direct_access_grants_enabled: + description: + - Are direct access grants enabled for this client or not (OpenID connect). + This is 'directAccessGrantsEnabled' in the Keycloak REST API. + aliases: + - directAccessGrantsEnabled + type: bool + + service_accounts_enabled: + description: + - Are service accounts enabled for this client or not (OpenID connect). + This is 'serviceAccountsEnabled' in the Keycloak REST API. + aliases: + - serviceAccountsEnabled + type: bool + + authorization_services_enabled: + description: + - Are authorization services enabled for this client or not (OpenID connect). + This is 'authorizationServicesEnabled' in the Keycloak REST API. + aliases: + - authorizationServicesEnabled + type: bool + + public_client: + description: + - Is the access type for this client public or not. + This is 'publicClient' in the Keycloak REST API. + aliases: + - publicClient + type: bool + + frontchannel_logout: + description: + - Is frontchannel logout enabled for this client or not. + This is 'frontchannelLogout' in the Keycloak REST API. + aliases: + - frontchannelLogout + type: bool + + protocol: + description: + - Type of client (either C(openid-connect) or C(saml). + type: str + choices: ['openid-connect', 'saml'] + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client or not. + This is 'fullScopeAllowed' in the Keycloak REST API. + aliases: + - fullScopeAllowed + type: bool + + node_re_registration_timeout: + description: + - Cluster node re-registration timeout for this client. + This is 'nodeReRegistrationTimeout' in the Keycloak REST API. + type: int + aliases: + - nodeReRegistrationTimeout + + registered_nodes: + description: + - dict of registered cluster nodes (with C(nodename) as the key and last registration + time as the value). + This is 'registeredNodes' in the Keycloak REST API. + type: dict + aliases: + - registeredNodes + + client_template: + description: + - Client template to use for this client. If it does not exist this field will silently + be dropped. + This is 'clientTemplate' in the Keycloak REST API. + type: str + aliases: + - clientTemplate + + use_template_config: + description: + - Whether or not to use configuration from the I(client_template). + This is 'useTemplateConfig' in the Keycloak REST API. + aliases: + - useTemplateConfig + type: bool + + use_template_scope: + description: + - Whether or not to use scope configuration from the I(client_template). + This is 'useTemplateScope' in the Keycloak REST API. + aliases: + - useTemplateScope + type: bool + + use_template_mappers: + description: + - Whether or not to use mapper configuration from the I(client_template). + This is 'useTemplateMappers' in the Keycloak REST API. + aliases: + - useTemplateMappers + type: bool + + always_display_in_console: + description: + - Whether or not to display this client in account console, even if the + user does not have an active session. + aliases: + - alwaysDisplayInConsole + type: bool + version_added: 4.7.0 + + surrogate_auth_required: + description: + - Whether or not surrogate auth is required. + This is 'surrogateAuthRequired' in the Keycloak REST API. + aliases: + - surrogateAuthRequired + type: bool + + authorization_settings: + description: + - a data structure defining the authorization settings for this client. For reference, + please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). + This is 'authorizationSettings' in the Keycloak REST API. + type: dict + aliases: + - authorizationSettings + + authentication_flow_binding_overrides: + description: + - Override realm authentication flow bindings. + type: dict + aliases: + - authenticationFlowBindingOverrides + version_added: 3.4.0 + + default_client_scopes: + description: + - List of default client scopes. + aliases: + - defaultClientScopes + type: list + elements: str + version_added: 4.7.0 + + optional_client_scopes: + description: + - List of optional client scopes. + aliases: + - optionalClientScopes + type: list + elements: str + version_added: 4.7.0 + + protocol_mappers: + description: + - a list of dicts defining protocol mappers for this client. + This is 'protocolMappers' in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + type: bool + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. + is active. + choices: ['openid-connect', 'saml'] + type: str + + protocolMapper: + description: + - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the I(existing) field. + type: dict + + attributes: + description: + - A dict of further attributes for this client. This can contain various configuration + settings; an example is given in the examples section. While an exhaustive list of + permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak + API does not validate whether a given option is appropriate for the protocol used; if specified + anyway, Keycloak will simply not use it. + type: dict + suboptions: + saml.authnstatement: + description: + - For SAML clients, boolean specifying whether or not a statement containing method and timestamp + should be included in the login response. + + saml.client.signature: + description: + - For SAML clients, boolean specifying whether a client signature is required and validated. + + saml.encrypt: + description: + - Boolean specifying whether SAML assertions should be encrypted with the client's public key. + + saml.force.post.binding: + description: + - For SAML clients, boolean specifying whether always to use POST binding for responses. + + saml.onetimeuse.condition: + description: + - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. + + saml.server.signature: + description: + - Boolean specifying whether SAML documents should be signed by the realm. + + saml.server.signature.keyinfo.ext: + description: + - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion + of the signing key id in the SAML Extensions element. + + saml.signature.algorithm: + description: + - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1). + + saml.signing.certificate: + description: + - SAML signing key certificate, base64-encoded. + + saml.signing.private.key: + description: + - SAML signing key private key, base64-encoded. + + saml_assertion_consumer_url_post: + description: + - SAML POST Binding URL for the client's assertion consumer service (login responses). + + saml_assertion_consumer_url_redirect: + description: + - SAML Redirect Binding URL for the client's assertion consumer service (login responses). + + + saml_force_name_id_format: + description: + - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. + + saml_name_id_format: + description: + - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent)) + + saml_signature_canonicalization_method: + description: + - SAML signature canonicalization method. This is one of four values, namely + C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, + C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, + C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and + C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. + + saml_single_logout_service_url_post: + description: + - SAML POST binding url for the client's single logout service. + + saml_single_logout_service_url_redirect: + description: + - SAML redirect binding url for the client's single logout service. + + user.info.response.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned). + + request.object.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending + OIDC request object. One of C(any), C(none), C(RS256). + + use.jwks.url: + description: + - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client + public keys. + + jwks.url: + description: + - For OpenID-Connect clients, URL where client keys in JWK are stored. + + jwt.credential.certificate: + description: + - For OpenID-Connect clients, client certificate for validating JWT issued by + client and signed by its key, base64-encoded. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Eike Frost (@eikef) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak client (minimal example), authentication with credentials + community.general.keycloak_client: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: present + delegate_to: localhost + + +- name: Create or update Keycloak client (minimal example), authentication with token + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + client_id: test + state: present + delegate_to: localhost + + +- name: Delete a Keycloak client + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: absent + delegate_to: localhost + + +- name: Create or update a Keycloak client (with all the bells and whistles) + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + realm: master + client_id: test + id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 + name: this_is_a_test + description: Description of this wonderful client + root_url: https://www.example.com/ + admin_url: https://www.example.com/admin_url + base_url: basepath + enabled: true + client_authenticator_type: client-secret + secret: REALLYWELLKEPTSECRET + redirect_uris: + - https://www.example.com/* + - http://localhost:8888/ + web_origins: + - https://www.example.com/* + not_before: 1507825725 + bearer_only: false + consent_required: false + standard_flow_enabled: true + implicit_flow_enabled: false + direct_access_grants_enabled: false + service_accounts_enabled: false + authorization_services_enabled: false + public_client: false + frontchannel_logout: false + protocol: openid-connect + full_scope_allowed: false + node_re_registration_timeout: -1 + client_template: test + use_template_config: false + use_template_scope: false + use_template_mappers: false + always_display_in_console: true + registered_nodes: + node01.example.com: 1507828202 + registration_access_token: eyJWT_TOKEN + surrogate_auth_required: false + default_roles: + - test01 + - test02 + authentication_flow_binding_overrides: + browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + consentRequired: true + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + consentRequired: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + saml.authnstatement: true + saml.client.signature: true + saml.force.post.binding: true + saml.server.signature: true + saml.signature.algorithm: RSA_SHA256 + saml.signing.certificate: CERTIFICATEHERE + saml.signing.private.key: PRIVATEKEYHERE + saml_force_name_id_format: false + saml_name_id_format: username + saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" + user.info.response.signature.alg: RS256 + request.object.signature.alg: RS256 + use.jwks.url: true + jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT + jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client testclient has been updated" + +proposed: + description: Representation of proposed client. + returned: always + type: dict + sample: { + clientId: "test" + } + +existing: + description: Representation of existing client (sample is truncated). + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } + +end_state: + description: Representation of client after module execution (sample is truncated). + returned: on success + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +import copy + + +def normalise_cr(clientrep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientrep: the clientrep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientrep dict + """ + # Avoid the dict passed in to be modified + clientrep = clientrep.copy() + + if 'attributes' in clientrep: + clientrep['attributes'] = list(sorted(clientrep['attributes'])) + + if 'redirectUris' in clientrep: + clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) + + if 'protocolMappers' in clientrep: + clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientrep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + + return clientrep + + +def sanitize_cr(clientrep): + """ Removes probably sensitive details from a client representation. + + :param clientrep: the clientrep dict to be sanitized + :return: sanitized clientrep dict + """ + result = copy.deepcopy(clientrep) + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return normalise_cr(result) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + name=dict(type='str'), + description=dict(type='str'), + root_url=dict(type='str', aliases=['rootUrl']), + admin_url=dict(type='str', aliases=['adminUrl']), + base_url=dict(type='str', aliases=['baseUrl']), + surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), + enabled=dict(type='bool'), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), + secret=dict(type='str', no_log=True), + registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), + web_origins=dict(type='list', elements='str', aliases=['webOrigins']), + not_before=dict(type='int', aliases=['notBefore']), + bearer_only=dict(type='bool', aliases=['bearerOnly']), + consent_required=dict(type='bool', aliases=['consentRequired']), + standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), + implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), + direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), + service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), + authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), + public_client=dict(type='bool', aliases=['publicClient']), + frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), + node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), + registered_nodes=dict(type='dict', aliases=['registeredNodes']), + client_template=dict(type='str', aliases=['clientTemplate']), + use_template_config=dict(type='bool', aliases=['useTemplateConfig']), + use_template_scope=dict(type='bool', aliases=['useTemplateScope']), + use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), + authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + authorization_settings=dict(type='dict', aliases=['authorizationSettings']), + default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), + optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['client_id', 'id'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + cid = module.params.get('id') + state = module.params.get('state') + + # Filter and map the parameters names that apply to the client + client_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) + if before_client is not None: + cid = before_client['id'] + else: + before_client = kc.get_client_by_id(cid, realm=realm) + + if before_client is None: + before_client = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for client_param in client_params: + new_param_value = module.params.get(client_param) + + # some lists in the Keycloak API are sorted, some are not. + if isinstance(new_param_value, list): + if client_param in ['attributes']: + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if client_param == 'protocol_mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + + changeset[camel(client_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_client = before_client.copy() + desired_client.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + result['existing'] = sanitize_cr(before_client) + + # Cater for when it doesn't exist (an empty dict) + if not before_client: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Client does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'clientId' not in desired_client: + module.fail_json(msg='client_id needs to be specified when creating a new client') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_client)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_client(desired_client, realm=realm) + after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been created.' % desired_client['clientId'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + result['changed'] = True + + if module.check_mode: + # We can only compare the current client with the proposed updates we have + before_norm = normalise_cr(before_client, remove_ids=True) + desired_norm = normalise_cr(desired_client, remove_ids=True) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = (before_norm != desired_norm) + + module.exit_json(**result) + + # do the update + kc.update_client(cid, desired_client, realm=realm) + + after_client = kc.get_client_by_id(cid, realm=realm) + if before_client == after_client: + result['changed'] = False + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(after_client)) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_client(cid, realm=realm) + result['proposed'] = {} + + result['end_state'] = {} + + result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py new file mode 100644 index 000000000..57dcac48d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py @@ -0,0 +1,361 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_client_rolemapping + +short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API + +version_added: 3.5.0 + +description: + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup + to the API to translate the name into the role ID. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the client_rolemapping. + - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_rolemapping will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + + gid: + type: str + description: + - Id of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + client_id: + type: str + description: + - Name of the client to be mapped (different than I(cid)). + - This parameter is required (can be replaced by cid for less API call). + + cid: + type: str + description: + - Id of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but + providing it will reduce the number of API calls required. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a group + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: { + clientId: "test" + } + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, +) +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + + # Check the parameters + if cid is None and client_id is None: + module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + if cid is None: + cid = kc.get_client_id(client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s:' % (role['name'])) + # Fetch missing role_name + else: + role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] + if role['name'] is None: + module.fail_json(msg='Could not fetch role %s' % (role['id'])) + + # Get effective client-level role mappings + available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm) + assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role['name'] == available_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + result['proposed'].append(available_role) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role['name'] == assigned_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + if assigned_role in result['proposed']: # Handle double removal + result['proposed'].remove(assigned_role) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py new file mode 100644 index 000000000..a23d92867 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py @@ -0,0 +1,506 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientscope + +short_description: Allows administration of Keycloak client_scopes via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup + to the API to translate the name into the client_scope ID. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the client_scope. + - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_scope will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the client_scope. + - This parameter is required only when creating or updating the client_scope. + + realm: + type: str + description: + - They Keycloak realm under which this client_scope resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this client_scope. + - This parameter is not required for updating or deleting a client_scope but + providing it will reduce the number of API calls required. + + description: + type: str + description: + - Description for this client_scope. + - This parameter is not required for updating or deleting a client_scope. + + protocol: + description: + - Type of client. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. + - This is 'protocolMappers' in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + protocol: + description: + - This specifies for which protocol this protocol mapper. + - is active. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocolMapper: + description: + - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least:" + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the C(existing) return value. + type: dict + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the client_scope. + - Values may be single values (for example a string) or a list of strings. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Create a Keycloak client_scopes, authentication with credentials + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scopes, authentication with token + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak client_scopes + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak client_scope based on name + community.general.keycloak_clientscope: + name: my-clientscope-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak client_scope + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-clientscope-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scope with some custom attributes + community.general.keycloak_clientscope: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_clientscope + description: description-of-clientscope + protocol: openid-connect + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client_scope testclientscope has been updated" + +proposed: + description: Representation of proposed client scope. + returned: always + type: dict + sample: { + clientId: "test" + } + +existing: + description: Representation of existing client scope (sample is truncated). + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } + +end_state: + description: Representation of client scope after module execution (sample is truncated). + returned: on success + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(clientscoperep): + """ Removes probably sensitive details from a clientscoperep representation. + + :param clientscoperep: the clientscoperep dict to be sanitized + :return: sanitized clientrep dict + """ + result = clientscoperep.copy() + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + attributes=dict(type='dict'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + name = module.params.get('name') + protocol_mappers = module.params.get('protocol_mappers') + + # Filter and map the parameters names that apply to the client scope + clientscope_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_clientscope = kc.get_clientscope_by_name(name, realm=realm) + else: + before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) + + if before_clientscope is None: + before_clientscope = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for clientscope_param in clientscope_params: + new_param_value = module.params.get(clientscope_param) + + # some lists in the Keycloak API are sorted, some are not. + if isinstance(new_param_value, list): + if clientscope_param in ['attributes']: + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if clientscope_param == 'protocol_mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + changeset[camel(clientscope_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_clientscope = before_clientscope.copy() + desired_clientscope.update(changeset) + + # Cater for when it doesn't exist (an empty dict) + if not before_clientscope: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Clientscope does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new clientscope') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_clientscope(desired_clientscope, realm=realm) + after_clientscope = kc.get_clientscope_by_name(name, realm) + + result['end_state'] = sanitize_cr(after_clientscope) + + result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], + id=after_clientscope['id']) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_clientscope == before_clientscope: + result['changed'] = False + result['end_state'] = sanitize_cr(desired_clientscope) + result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + kc.update_clientscope(desired_clientscope, realm=realm) + + # do the protocolmappers update + if protocol_mappers is not None: + for protocol_mapper in protocol_mappers: + # update if protocolmapper exist + current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm) + if current_protocolmapper is not None: + protocol_mapper['id'] = current_protocolmapper['id'] + kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm) + # create otherwise + else: + kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm) + + after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm) + + result['end_state'] = after_clientscope + + result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + cid = before_clientscope['id'] + kc.delete_clientscope(cid=cid, realm=realm) + + result['end_state'] = {} + + result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py new file mode 100644 index 000000000..facf02aa4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientscope_type + +short_description: Set the type of aclientscope in realm or client via Keycloak API + +version_added: 6.6.0 + +description: + - This module allows you to set the type (optional, default) of clientscopes + via the Keycloak REST API. It requires access to the REST API via OpenID + Connect; the user connecting and the client being used must have the + requisite access rights. In a default Keycloak installation, admin-cli and + an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + realm: + type: str + description: + - The Keycloak realm. + default: 'master' + + client_id: + description: + - The I(client_id) of the client. If not set the clientscop types are set as a default for the realm. + aliases: + - clientId + type: str + + default_clientscopes: + description: + - Client scopes that should be of type default. + type: list + elements: str + + optional_clientscopes: + description: + - Client scopes that should be of type optional. + type: list + elements: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Simon Pahl (@simonpahl) +''' + +EXAMPLES = ''' +- name: Set default client scopes on realm level + community.general.keycloak_clientsecret_info: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: "MyCustomRealm" + default_clientscopes: ['profile', 'roles'] + delegate_to: localhost + + +- name: Set default and optional client scopes on client level with token auth + community.general.keycloak_clientsecret_info: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + realm: "MyCustomRealm" + client_id: "MyCustomClient" + default_clientscopes: ['profile', 'roles'] + optional_clientscopes: ['phone'] + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "" +proposed: + description: Representation of proposed client-scope types mapping. + returned: always + type: dict + sample: { + default_clientscopes: ["profile", "role"], + optional_clientscopes: [] + } +existing: + description: + - Representation of client scopes before module execution. + returned: always + type: dict + sample: { + default_clientscopes: ["profile", "role"], + optional_clientscopes: ["phone"] + } +end_state: + description: + - Representation of client scopes after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: { + default_clientscopes: ["profile", "role"], + optional_clientscopes: [] + } +''' + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ + keycloak_argument_spec + + +def keycloak_clientscope_type_module(): + """ + Returns an AnsibleModule definition. + + :return: argument_spec dict + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default='master'), + client_id=dict(type='str', aliases=['clientId']), + default_clientscopes=dict(type='list', elements='str'), + optional_clientscopes=dict(type='list', elements='str'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([ + ['token', 'auth_realm', 'auth_username', 'auth_password'], + ['default_clientscopes', 'optional_clientscopes'] + ]), + required_together=([['auth_realm', 'auth_username', 'auth_password']]), + mutually_exclusive=[ + ['token', 'auth_realm'], + ['token', 'auth_username'], + ['token', 'auth_password'] + ]) + + return module + + +def clientscopes_to_add(existing, proposed): + to_add = [] + existing_clientscope_ids = extract_field(existing, 'id') + for clientscope in proposed: + if not clientscope['id'] in existing_clientscope_ids: + to_add.append(clientscope) + return to_add + + +def clientscopes_to_delete(existing, proposed): + to_delete = [] + proposed_clientscope_ids = extract_field(proposed, 'id') + for clientscope in existing: + if not clientscope['id'] in proposed_clientscope_ids: + to_delete.append(clientscope) + return to_delete + + +def extract_field(dictionary, field='name'): + return [cs[field] for cs in dictionary] + + +def main(): + """ + Module keycloak_clientscope_type + + :return: + """ + + module = keycloak_clientscope_type_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + client_id = module.params.get('client_id') + default_clientscopes = module.params.get('default_clientscopes') + optional_clientscopes = module.params.get('optional_clientscopes') + + result = dict(changed=False, msg='', proposed={}, existing={}, end_state={}) + + all_clientscopes = kc.get_clientscopes(realm) + default_clientscopes_real = [] + optional_clientscopes_real = [] + + for client_scope in all_clientscopes: + if default_clientscopes is not None and client_scope["name"] in default_clientscopes: + default_clientscopes_real.append(client_scope) + if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes: + optional_clientscopes_real.append(client_scope) + + if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes): + module.fail_json(msg='At least one of the default_clientscopes does not exist!') + + if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes): + module.fail_json(msg='At least one of the optional_clientscopes does not exist!') + + result['proposed'].update({ + 'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes, + 'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes + }) + + default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id) + optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id) + + result['existing'].update({ + 'default_clientscopes': extract_field(default_clientscopes_existing), + 'optional_clientscopes': extract_field(optional_clientscopes_existing) + }) + + if module._diff: + result['diff'] = dict(before=result['existing'], after=result['proposed']) + + if module.check_mode: + module.exit_json(**result) + + default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) + + default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) + + # first delete so clientscopes can change type + for clientscope in default_clientscopes_delete: + kc.delete_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_delete: + kc.delete_optional_clientscope(clientscope['id'], realm, client_id) + + for clientscope in default_clientscopes_add: + kc.add_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_add: + kc.add_optional_clientscope(clientscope['id'], realm, client_id) + + result["changed"] = ( + len(default_clientscopes_add) > 0 + or len(optional_clientscopes_add) > 0 + or len(default_clientscopes_delete) > 0 + or len(optional_clientscopes_delete) > 0 + ) + + result['end_state'].update({ + 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), + 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py new file mode 100644 index 000000000..98a41ad20 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientsecret_info + +short_description: Retrieve client secret via Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to get a Keycloak client secret via the Keycloak + REST API. It requires access to the REST API via OpenID Connect; the user + connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, + as would a separate client definition with the scope tailored to your needs + and a user having the expected roles. + + - When retrieving a new client secret, where possible provide the client's + I(id) (not I(client_id)) to the module. This removes a lookup to the API to + translate the I(client_id) into the client ID. + + - "Note that this module returns the client secret. To avoid this showing up in the logs, + please add C(no_log: true) to the task." + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but + providing it will reduce the number of API calls required. + type: str + + client_id: + description: + - The I(client_id) of the client. Passing this instead of I(id) results in an + extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +''' + +EXAMPLES = ''' +- name: Get a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true +''' + +RETURN = ''' +msg: + description: Textual description of whether we succeeded or failed + returned: always + type: str + +clientsecret_info: + description: Representation of the client secret + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_info + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + clientsecret = kc.get_clientsecret(id=id, realm=realm) + + result = { + 'clientsecret_info': clientsecret, + 'msg': 'Get client secret successful for ID {id}'.format(id=id) + } + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py new file mode 100644 index 000000000..7e8b29543 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientsecret_regenerate + +short_description: Regenerate Keycloak client secret via Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to regenerate a Keycloak client secret via the + Keycloak REST API. It requires access to the REST API via OpenID Connect; + the user connecting and the client being used must have the requisite access + rights. In a default Keycloak installation, admin-cli and an admin user + would work, as would a separate client definition with the scope tailored to + your needs and a user having the expected roles. + + - When regenerating a client secret, where possible provide the client's id + (not client_id) to the module. This removes a lookup to the API to + translate the client_id into the client ID. + + - "Note that this module returns the client secret. To avoid this showing up in the logs, + please add C(no_log: true) to the task." + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but + providing it will reduce the number of API calls required. + type: str + + client_id: + description: + - The client_id of the client. Passing this instead of id results in an + extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +''' + +EXAMPLES = ''' +- name: Regenerate a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the client credential after module execution + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 + +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_regenerate + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + if module.check_mode: + dummy_result = { + "msg": 'No action taken while in check mode', + "end_state": {'type': 'secret', 'value': 'X' * 32} + } + module.exit_json(**dummy_result) + + # Create new secret + clientsecret = kc.create_clientsecret(id=id, realm=realm) + + result = { + "msg": 'New client secret has been generated for ID {id}'.format(id=id), + "end_state": clientsecret + } + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py new file mode 100644 index 000000000..d2555afc5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py @@ -0,0 +1,456 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clienttemplate + +short_description: Allows administration of Keycloak client templates via Keycloak API + +description: + - This module allows the administration of Keycloak client templates via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html) + + - The Keycloak API does not always enforce for only sensible settings to be used -- you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the client template. + - On C(present), the client template will be created (or updated if it exists already). + - On C(absent), the client template will be removed if it exists + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - Id of client template to be worked on. This is usually a UUID. + type: str + + realm: + description: + - Realm this client template is found in. + type: str + default: master + + name: + description: + - Name of the client template. + type: str + + description: + description: + - Description of the client template in Keycloak. + type: str + + protocol: + description: + - Type of client template (either C(openid-connect) or C(saml). + choices: ['openid-connect', 'saml'] + type: str + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client template or not. + This is 'fullScopeAllowed' in the Keycloak REST API. + type: bool + + protocol_mappers: + description: + - a list of dicts defining protocol mappers for this client template. + This is 'protocolMappers' in the Keycloak REST API. + type: list + elements: dict + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + type: bool + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. + is active. + choices: ['openid-connect', 'saml'] + type: str + + protocolMapper: + description: + - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the I(existing) field. + type: dict + + attributes: + description: + - A dict of further attributes for this client template. This can contain various + configuration settings, though in the default installation of Keycloak as of 3.4, none + are documented or known, so this is usually empty. + type: dict + +notes: + - The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), + I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and + I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on + Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, + they are not available through this module. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Eike Frost (@eikef) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak client template (minimal), authentication with credentials + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + delegate_to: localhost + +- name: Create or update Keycloak client template (minimal), authentication with token + community.general.keycloak_clienttemplate: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + realm: master + name: this_is_a_test + delegate_to: localhost + +- name: Delete Keycloak client template + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: absent + name: test01 + delegate_to: localhost + +- name: Create or update Keycloak client template (with a protocol mapper) + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + name: this_is_a_test + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + consentRequired: true + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + full_scope_allowed: false + id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client template testclient has been updated" + +proposed: + description: Representation of proposed client template. + returned: always + type: dict + sample: { + name: "test01" + } + +existing: + description: Representation of existing client template (sample is truncated). + returned: always + type: dict + sample: { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } + +end_state: + description: Representation of client template after module execution (sample is truncated). + returned: on success + type: dict + sample: { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + realm=dict(type='str', default='master'), + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml']), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + + # Filter and map the parameters names that apply to the client template + clientt_params = [x for x in module.params + if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm', + 'auth_client_secret', 'auth_username', 'auth_password', + 'validate_certs', 'realm'] and module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm) + if before_clientt is not None: + cid = before_clientt['id'] + else: + before_clientt = kc.get_client_template_by_id(cid, realm=realm) + + if before_clientt is None: + before_clientt = {} + + result['existing'] = before_clientt + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for clientt_param in clientt_params: + # lists in the Keycloak API are sorted + new_param_value = module.params.get(clientt_param) + if isinstance(new_param_value, list): + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + changeset[camel(clientt_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_clientt = before_clientt.copy() + desired_clientt.update(changeset) + + result['proposed'] = changeset + + # Cater for when it doesn't exist (an empty dict) + if not before_clientt: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Client template does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'name' not in desired_clientt: + module.fail_json(msg='name needs to be specified when creating a new client') + + if module._diff: + result['diff'] = dict(before='', after=desired_clientt) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_client_template(desired_clientt, realm=realm) + after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm) + + result['end_state'] = after_clientt + + result['msg'] = 'Client template %s has been created.' % desired_clientt['name'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + result['changed'] = True + if module.check_mode: + # We can only compare the current client template with the proposed updates we have + if module._diff: + result['diff'] = dict(before=before_clientt, + after=desired_clientt) + + module.exit_json(**result) + + # do the update + kc.update_client_template(cid, desired_clientt, realm=realm) + + after_clientt = kc.get_client_template_by_id(cid, realm=realm) + if before_clientt == after_clientt: + result['changed'] = False + + result['end_state'] = after_clientt + + if module._diff: + result['diff'] = dict(before=before_clientt, after=after_clientt) + + result['msg'] = 'Client template %s has been updated.' % desired_clientt['name'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_clientt, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_client_template(cid, realm=realm) + result['proposed'] = {} + + result['end_state'] = {} + + result['msg'] = 'Client template %s has been deleted.' % before_clientt['name'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_group.py b/ansible_collections/community/general/plugins/modules/keycloak_group.py new file mode 100644 index 000000000..399bc5b4f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_group.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_group + +short_description: Allows administration of Keycloak groups via Keycloak API + +description: + - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a group, where possible provide the group ID to the module. This removes a lookup + to the API to translate the name into the group ID. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the group. + - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide. + - >- + On C(absent), the group will be removed if it exists. Be aware that absenting + a group with subgroups will automatically delete all its subgroups too. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the group. + - This parameter is required only when creating or updating the group. + + realm: + type: str + description: + - They Keycloak realm under which this group resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this group. + - This parameter is not required for updating or deleting a group but + providing it will reduce the number of API calls required. + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the group. + - Values may be single values (e.g. a string) or a list of strings. + + parents: + version_added: "6.4.0" + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - >- + Set this to create a group as a subgroup of another group or groups (parents) or + when accessing an existing subgroup by name. + - >- + Not necessary to set when accessing an existing subgroup by its C(ID) because in + that case the group can be directly queried without necessarily knowing its parent(s). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using I(name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time + but current implementation only always use just one of them, with ID + being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using I(id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time + but current implementation only always use just one of them, with ID + being preferred. + +notes: + - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API + are read-only for groups. This limitation will be removed in a later version of this module. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Adam Goossens (@adamgoossens) +''' + +EXAMPLES = ''' +- name: Create a Keycloak group, authentication with credentials + community.general.keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + register: result_new_kcgrp + delegate_to: localhost + +- name: Create a Keycloak group, authentication with token + community.general.keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak group + community.general.keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak group based on name + community.general.keycloak_group: + name: my-group-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak group + community.general.keycloak_group: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-group-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak group with some custom attributes + community.general.keycloak_group: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_group + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost + +- name: Create a Keycloak subgroup of a base group (using parent name) + community.general.keycloak_group: + name: my-new-kc-group-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + register: result_new_kcgrp_sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a base group (using parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub2 + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp.end_state.id }}" + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using parent names) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + - name: my-new-kc-group-sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using direct parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp_sub.end_state.id }}" + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the group after module execution (sample is truncated). + returned: on success + type: complex + contains: + id: + description: GUID that identifies the group. + type: str + returned: always + sample: 23f38145-3195-462c-97e7-97041ccea73e + name: + description: Name of the group. + type: str + returned: always + sample: grp-test-123 + attributes: + description: Attributes applied to this group. + type: dict + returned: always + sample: + attr1: ["val1", "val2", "val3"] + path: + description: URI path to the group. + type: str + returned: always + sample: /grp-test-123 + realmRoles: + description: An array of the realm-level roles granted to this group. + type: list + returned: always + sample: [] + subGroups: + description: A list of groups that are children of this group. These groups will have the same parameters as + documented here. + type: list + returned: always + clientRoles: + description: A list of client-level roles granted to this group. + type: list + returned: always + sample: [] + access: + description: A dict describing the accesses you have to this group based on the credentials used. + type: dict + returned: always + sample: + manage: true + manageMembership: true + view: true +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + attributes=dict(type='dict'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, group='') + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + gid = module.params.get('id') + name = module.params.get('name') + attributes = module.params.get('attributes') + + parents = module.params.get('parents') + + # attributes in Keycloak have their values returned as lists + # via the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if attributes is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # Filter and map the parameters names that apply to the group + group_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'parents'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if gid is None: + before_group = kc.get_group_by_name(name, realm=realm, parents=parents) + else: + before_group = kc.get_group_by_groupid(gid, realm=realm) + + if before_group is None: + before_group = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in group_params: + new_param_value = module.params.get(param) + old_value = before_group[param] if param in before_group else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_group = before_group.copy() + desired_group.update(changeset) + + # Cater for when it doesn't exist (an empty dict) + if not before_group: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Group does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new group') + + if module._diff: + result['diff'] = dict(before='', after=desired_group) + + if module.check_mode: + module.exit_json(**result) + + # create it ... + if parents: + # ... as subgroup of another parent group + kc.create_subgroup(parents, desired_group, realm=realm) + else: + # ... as toplvl base group + kc.create_group(desired_group, realm=realm) + + after_group = kc.get_group_by_name(name, realm, parents=parents) + + result['end_state'] = after_group + + result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], + id=after_group['id']) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_group == before_group: + result['changed'] = False + result['end_state'] = desired_group + result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_group, after=desired_group) + + if module.check_mode: + module.exit_json(**result) + + # do the update + kc.update_group(desired_group, realm=realm) + + after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) + + result['end_state'] = after_group + + result['msg'] = "Group {id} has been updated".format(id=after_group['id']) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_group, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + gid = before_group['id'] + kc.delete_group(groupid=gid, realm=realm) + + result['end_state'] = {} + + result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py new file mode 100644 index 000000000..0d12ae03a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py @@ -0,0 +1,654 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_identity_provider + +short_description: Allows administration of Keycloak identity providers via Keycloak API + +version_added: 3.6.0 + +description: + - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the identity provider. + - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the identity provider will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this identity provider resides. + default: 'master' + type: str + + alias: + description: + - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. + required: true + type: str + + display_name: + description: + - Friendly name for identity provider. + aliases: + - displayName + type: str + + enabled: + description: + - Enable/disable this identity provider. + type: bool + + store_token: + description: + - Enable/disable whether tokens must be stored after authenticating users. + aliases: + - storeToken + type: bool + + add_read_token_role_on_create: + description: + - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. + aliases: + - addReadTokenRoleOnCreate + type: bool + + trust_email: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + aliases: + - trustEmail + type: bool + + link_only: + description: + - If true, users cannot log in through this provider. They can only link to this provider. + This is useful if you don't want to allow login from the provider, but want to integrate with a provider. + aliases: + - linkOnly + type: bool + + first_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after first login with this identity provider. + aliases: + - firstBrokerLoginFlowAlias + type: str + + post_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after each login with this identity provider. + aliases: + - postBrokerLoginFlowAlias + type: str + + authenticate_by_default: + description: + - Specifies if this identity provider should be used by default for authentication even before displaying login screen. + aliases: + - authenticateByDefault + type: bool + + provider_id: + description: + - Protocol used by this provider (supported values are C(oidc) or C(saml)). + aliases: + - providerId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). + Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the I(existing) field. + type: dict + suboptions: + hide_on_login_page: + description: + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. + aliases: + - hideOnLoginPage + type: bool + + gui_order: + description: + - Number defining order of the provider in GUI (for example, on Login page). + aliases: + - guiOrder + type: int + + sync_mode: + description: + - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. + aliases: + - syncMode + type: str + + issuer: + description: + - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. + type: str + + authorizationUrl: + description: + - The Authorization URL. + type: str + + tokenUrl: + description: + - The Token URL. + type: str + + logoutUrl: + description: + - End session endpoint to use to logout user from external IDP. + type: str + + userInfoUrl: + description: + - The User Info URL. + type: str + + clientAuthMethod: + description: + - The client authentication method. + type: str + + clientId: + description: + - The client or client identifier registered within the identity provider. + type: str + + clientSecret: + description: + - The client or client secret registered within the identity provider. + type: str + + defaultScope: + description: + - The scopes to be sent when asking for authorization. + type: str + + validateSignature: + description: + - Enable/disable signature validation of external IDP signatures. + type: bool + + useJwksUrl: + description: + - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. + type: bool + + jwksUrl: + description: + - URL where identity provider keys in JWK format are stored. See JWK specification for more details. + type: str + + entityId: + description: + - The Entity ID that will be used to uniquely identify this SAML Service Provider. + type: str + + singleSignOnServiceUrl: + description: + - The URL that must be used to send authentication requests (SAML AuthnRequest). + type: str + + singleLogoutServiceUrl: + description: + - The URL that must be used to send logout requests. + type: str + + backchannelSupported: + description: + - Does the external IDP support backchannel logout? + type: str + + nameIDPolicyFormat: + description: + - Specifies the URI reference corresponding to a name identifier format. + type: str + + principalType: + description: + - Way to identify and track external users from the assertion. + type: str + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. + type: str + + identityProviderAlias: + description: + - Alias of the identity provider for this mapper. + type: str + + identityProviderMapper: + description: + - Type of mapper. + type: str + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create OIDC identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + syncMode: FORCE + mappers: + - name: first_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: first_name + user.attribute: first_name + syncMode: INHERIT + - name: last_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: last_name + user.attribute: last_name + syncMode: INHERIT + +- name: Create SAML identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: saml-idp + display_name: SAML IdP + enabled: true + provider_id: saml + config: + entityId: https://auth.example.com/auth/realms/myrealm + singleSignOnServiceUrl: https://idp.example.com/login + wantAuthnRequestsSigned: true + wantAssertionsSigned: true + mappers: + - name: roles + identityProviderMapper: saml-user-attribute-idp-mapper + config: + user.attribute: roles + attribute.friendly.name: User Roles + attribute.name: roles + syncMode: INHERIT +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Identity provider my-idp has been created" + +proposed: + description: Representation of proposed identity provider. + returned: always + type: dict + sample: { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" + } + +existing: + description: Representation of existing identity provider. + returned: always + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } + +end_state: + description: Representation of identity provider after module execution. + returned: on success + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + + +def sanitize(idp): + idpcopy = deepcopy(idp) + if 'config' in idpcopy: + if 'clientSecret' in idpcopy['config']: + idpcopy['clientSecret'] = '**********' + return idpcopy + + +def get_identity_provider_with_mappers(kc, alias, realm): + idp = kc.get_identity_provider(alias, realm) + if idp is not None: + idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + if idp is None: + idp = {} + return idp + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + identityProviderAlias=dict(type='str'), + identityProviderMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + alias=dict(type='str', required=True), + add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), + authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), + config=dict(type='dict'), + display_name=dict(type='str', aliases=['displayName']), + enabled=dict(type='bool'), + first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), + link_only=dict(type='bool', aliases=['linkOnly']), + post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), + provider_id=dict(type='str', aliases=['providerId']), + store_token=dict(type='bool', aliases=['storeToken']), + trust_email=dict(type='bool', aliases=['trustEmail']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + alias = module.params.get('alias') + state = module.params.get('state') + + # Filter and map the parameters names that apply to the identity provider. + idp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + before_idp = get_identity_provider_with_mappers(kc, alias, realm) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in idp_params: + new_param_value = module.params.get(param) + old_value = before_idp[camel(param)] if camel(param) in before_idp else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + for change in module.params['mappers']: + change = dict((k, v) for k, v in change.items() if change[k] is not None) + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if before_idp == dict(): + old_mapper = dict() + elif change.get('id') is not None: + old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) + if old_mapper is None: + old_mapper = dict() + else: + found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = dict() + new_mapper = old_mapper.copy() + new_mapper.update(change) + if new_mapper != old_mapper: + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_idp = before_idp.copy() + desired_idp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_idp) + + # Cater for when it doesn't exist (an empty dict) + if not before_idp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Identity provider does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # create it + desired_idp = desired_idp.copy() + mappers = desired_idp.pop('mappers', []) + kc.create_identity_provider(desired_idp, realm) + for mapper in mappers: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_idp == before_idp: + result['changed'] = False + result['end_state'] = sanitize(desired_idp) + result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_idp = desired_idp.copy() + updated_mappers = desired_idp.pop('mappers', []) + kc.update_identity_provider(desired_idp, realm) + for mapper in updated_mappers: + if mapper.get('id') is not None: + kc.update_identity_provider_mapper(mapper, alias, realm) + else: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + for mapper in [x for x in before_idp['mappers'] + if [y for y in updated_mappers if y["name"] == x['name']] == []]: + kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_identity_provider(alias, realm) + + result['end_state'] = {} + + result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm.py b/ansible_collections/community/general/plugins/modules/keycloak_realm.py new file mode 100644 index 000000000..53f81be48 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_realm.py @@ -0,0 +1,826 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_realm + +short_description: Allows administration of Keycloak realm via Keycloak API + +version_added: 3.0.0 + +description: + - This module allows the administration of Keycloak realm via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the realm being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + Aliases are provided so camelCased versions can be used as well. + + - The Keycloak API does not always sanity check inputs e.g. you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the realm. + - On C(present), the realm will be created (or updated if it exists already). + - On C(absent), the realm will be removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - The realm to create. + type: str + realm: + description: + - The realm name. + type: str + access_code_lifespan: + description: + - The realm access code lifespan. + aliases: + - accessCodeLifespan + type: int + access_code_lifespan_login: + description: + - The realm access code lifespan login. + aliases: + - accessCodeLifespanLogin + type: int + access_code_lifespan_user_action: + description: + - The realm access code lifespan user action. + aliases: + - accessCodeLifespanUserAction + type: int + access_token_lifespan: + description: + - The realm access token lifespan. + aliases: + - accessTokenLifespan + type: int + access_token_lifespan_for_implicit_flow: + description: + - The realm access token lifespan for implicit flow. + aliases: + - accessTokenLifespanForImplicitFlow + type: int + account_theme: + description: + - The realm account theme. + aliases: + - accountTheme + type: str + action_token_generated_by_admin_lifespan: + description: + - The realm action token generated by admin lifespan. + aliases: + - actionTokenGeneratedByAdminLifespan + type: int + action_token_generated_by_user_lifespan: + description: + - The realm action token generated by user lifespan. + aliases: + - actionTokenGeneratedByUserLifespan + type: int + admin_events_details_enabled: + description: + - The realm admin events details enabled. + aliases: + - adminEventsDetailsEnabled + type: bool + admin_events_enabled: + description: + - The realm admin events enabled. + aliases: + - adminEventsEnabled + type: bool + admin_theme: + description: + - The realm admin theme. + aliases: + - adminTheme + type: str + attributes: + description: + - The realm attributes. + type: dict + browser_flow: + description: + - The realm browser flow. + aliases: + - browserFlow + type: str + browser_security_headers: + description: + - The realm browser security headers. + aliases: + - browserSecurityHeaders + type: dict + brute_force_protected: + description: + - The realm brute force protected. + aliases: + - bruteForceProtected + type: bool + client_authentication_flow: + description: + - The realm client authentication flow. + aliases: + - clientAuthenticationFlow + type: str + client_scope_mappings: + description: + - The realm client scope mappings. + aliases: + - clientScopeMappings + type: dict + default_default_client_scopes: + description: + - The realm default default client scopes. + aliases: + - defaultDefaultClientScopes + type: list + elements: str + default_groups: + description: + - The realm default groups. + aliases: + - defaultGroups + type: list + elements: str + default_locale: + description: + - The realm default locale. + aliases: + - defaultLocale + type: str + default_optional_client_scopes: + description: + - The realm default optional client scopes. + aliases: + - defaultOptionalClientScopes + type: list + elements: str + default_roles: + description: + - The realm default roles. + aliases: + - defaultRoles + type: list + elements: str + default_signature_algorithm: + description: + - The realm default signature algorithm. + aliases: + - defaultSignatureAlgorithm + type: str + direct_grant_flow: + description: + - The realm direct grant flow. + aliases: + - directGrantFlow + type: str + display_name: + description: + - The realm display name. + aliases: + - displayName + type: str + display_name_html: + description: + - The realm display name HTML. + aliases: + - displayNameHtml + type: str + docker_authentication_flow: + description: + - The realm docker authentication flow. + aliases: + - dockerAuthenticationFlow + type: str + duplicate_emails_allowed: + description: + - The realm duplicate emails allowed option. + aliases: + - duplicateEmailsAllowed + type: bool + edit_username_allowed: + description: + - The realm edit username allowed option. + aliases: + - editUsernameAllowed + type: bool + email_theme: + description: + - The realm email theme. + aliases: + - emailTheme + type: str + enabled: + description: + - The realm enabled option. + type: bool + enabled_event_types: + description: + - The realm enabled event types. + aliases: + - enabledEventTypes + type: list + elements: str + events_enabled: + description: + - Enables or disables login events for this realm. + aliases: + - eventsEnabled + type: bool + version_added: 3.6.0 + events_expiration: + description: + - The realm events expiration. + aliases: + - eventsExpiration + type: int + events_listeners: + description: + - The realm events listeners. + aliases: + - eventsListeners + type: list + elements: str + failure_factor: + description: + - The realm failure factor. + aliases: + - failureFactor + type: int + internationalization_enabled: + description: + - The realm internationalization enabled option. + aliases: + - internationalizationEnabled + type: bool + login_theme: + description: + - The realm login theme. + aliases: + - loginTheme + type: str + login_with_email_allowed: + description: + - The realm login with email allowed option. + aliases: + - loginWithEmailAllowed + type: bool + max_delta_time_seconds: + description: + - The realm max delta time in seconds. + aliases: + - maxDeltaTimeSeconds + type: int + max_failure_wait_seconds: + description: + - The realm max failure wait in seconds. + aliases: + - maxFailureWaitSeconds + type: int + minimum_quick_login_wait_seconds: + description: + - The realm minimum quick login wait in seconds. + aliases: + - minimumQuickLoginWaitSeconds + type: int + not_before: + description: + - The realm not before. + aliases: + - notBefore + type: int + offline_session_idle_timeout: + description: + - The realm offline session idle timeout. + aliases: + - offlineSessionIdleTimeout + type: int + offline_session_max_lifespan: + description: + - The realm offline session max lifespan. + aliases: + - offlineSessionMaxLifespan + type: int + offline_session_max_lifespan_enabled: + description: + - The realm offline session max lifespan enabled option. + aliases: + - offlineSessionMaxLifespanEnabled + type: bool + otp_policy_algorithm: + description: + - The realm otp policy algorithm. + aliases: + - otpPolicyAlgorithm + type: str + otp_policy_digits: + description: + - The realm otp policy digits. + aliases: + - otpPolicyDigits + type: int + otp_policy_initial_counter: + description: + - The realm otp policy initial counter. + aliases: + - otpPolicyInitialCounter + type: int + otp_policy_look_ahead_window: + description: + - The realm otp policy look ahead window. + aliases: + - otpPolicyLookAheadWindow + type: int + otp_policy_period: + description: + - The realm otp policy period. + aliases: + - otpPolicyPeriod + type: int + otp_policy_type: + description: + - The realm otp policy type. + aliases: + - otpPolicyType + type: str + otp_supported_applications: + description: + - The realm otp supported applications. + aliases: + - otpSupportedApplications + type: list + elements: str + password_policy: + description: + - The realm password policy. + aliases: + - passwordPolicy + type: str + permanent_lockout: + description: + - The realm permanent lockout. + aliases: + - permanentLockout + type: bool + quick_login_check_milli_seconds: + description: + - The realm quick login check in milliseconds. + aliases: + - quickLoginCheckMilliSeconds + type: int + refresh_token_max_reuse: + description: + - The realm refresh token max reuse. + aliases: + - refreshTokenMaxReuse + type: int + registration_allowed: + description: + - The realm registration allowed option. + aliases: + - registrationAllowed + type: bool + registration_email_as_username: + description: + - The realm registration email as username option. + aliases: + - registrationEmailAsUsername + type: bool + registration_flow: + description: + - The realm registration flow. + aliases: + - registrationFlow + type: str + remember_me: + description: + - The realm remember me option. + aliases: + - rememberMe + type: bool + reset_credentials_flow: + description: + - The realm reset credentials flow. + aliases: + - resetCredentialsFlow + type: str + reset_password_allowed: + description: + - The realm reset password allowed option. + aliases: + - resetPasswordAllowed + type: bool + revoke_refresh_token: + description: + - The realm revoke refresh token option. + aliases: + - revokeRefreshToken + type: bool + smtp_server: + description: + - The realm smtp server. + aliases: + - smtpServer + type: dict + ssl_required: + description: + - The realm ssl required option. + choices: ['all', 'external', 'none'] + aliases: + - sslRequired + type: str + sso_session_idle_timeout: + description: + - The realm sso session idle timeout. + aliases: + - ssoSessionIdleTimeout + type: int + sso_session_idle_timeout_remember_me: + description: + - The realm sso session idle timeout remember me. + aliases: + - ssoSessionIdleTimeoutRememberMe + type: int + sso_session_max_lifespan: + description: + - The realm sso session max lifespan. + aliases: + - ssoSessionMaxLifespan + type: int + sso_session_max_lifespan_remember_me: + description: + - The realm sso session max lifespan remember me. + aliases: + - ssoSessionMaxLifespanRememberMe + type: int + supported_locales: + description: + - The realm supported locales. + aliases: + - supportedLocales + type: list + elements: str + user_managed_access_allowed: + description: + - The realm user managed access allowed option. + aliases: + - userManagedAccessAllowed + type: bool + verify_email: + description: + - The realm verify email option. + aliases: + - verifyEmail + type: bool + wait_increment_seconds: + description: + - The realm wait increment in seconds. + aliases: + - waitIncrementSeconds + type: int + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Christophe Gilles (@kris2kris) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak realm (minimal example) + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + id: realm + realm: realm + state: present + +- name: Delete a Keycloak realm + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + id: test + state: absent + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Realm testrealm has been updated" + +proposed: + description: Representation of proposed realm. + returned: always + type: dict + sample: { + id: "test" + } + +existing: + description: Representation of existing realm (sample is truncated). + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } + +end_state: + description: Representation of realm after module execution (sample is truncated). + returned: on success + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(realmrep): + """ Removes probably sensitive details from a realm representation. + + :param realmrep: the realmrep dict to be sanitized + :return: sanitized realmrep dict + """ + result = realmrep.copy() + if 'secret' in result: + result['secret'] = '********' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes'] = result['attributes'].copy() + result['attributes']['saml.signing.private.key'] = '********' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + realm=dict(type='str'), + access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), + access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), + access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), + access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), + access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), + account_theme=dict(type='str', aliases=['accountTheme']), + action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), + action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), + admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), + admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), + admin_theme=dict(type='str', aliases=['adminTheme']), + attributes=dict(type='dict'), + browser_flow=dict(type='str', aliases=['browserFlow']), + browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), + brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), + client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), + client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), + default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']), + default_groups=dict(type='list', elements='str', aliases=['defaultGroups']), + default_locale=dict(type='str', aliases=['defaultLocale']), + default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), + direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), + display_name=dict(type='str', aliases=['displayName']), + display_name_html=dict(type='str', aliases=['displayNameHtml']), + docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), + duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), + edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), + email_theme=dict(type='str', aliases=['emailTheme']), + enabled=dict(type='bool'), + enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_enabled=dict(type='bool', aliases=['eventsEnabled']), + events_expiration=dict(type='int', aliases=['eventsExpiration']), + events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), + failure_factor=dict(type='int', aliases=['failureFactor']), + internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), + login_theme=dict(type='str', aliases=['loginTheme']), + login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), + max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), + max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), + minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), + not_before=dict(type='int', aliases=['notBefore']), + offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), + offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), + offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), + otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), + otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), + otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), + otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), + otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), + otp_policy_type=dict(type='str', aliases=['otpPolicyType']), + otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), + password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), + permanent_lockout=dict(type='bool', aliases=['permanentLockout']), + quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), + refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), + registration_allowed=dict(type='bool', aliases=['registrationAllowed']), + registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), + registration_flow=dict(type='str', aliases=['registrationFlow']), + remember_me=dict(type='bool', aliases=['rememberMe']), + reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), + revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), + smtp_server=dict(type='dict', aliases=['smtpServer']), + ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), + sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), + sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), + sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), + sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), + supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), + user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), + verify_email=dict(type='bool', aliases=['verifyEmail']), + wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'realm', 'enabled'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + + # convert module parameters to realm representation parameters (if they belong in there) + params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] + + # Filter and map the parameters names that apply to the role + realm_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # See whether the realm already exists in Keycloak + before_realm = kc.get_realm_by_id(realm=realm) + + if before_realm is None: + before_realm = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for realm_param in realm_params: + new_param_value = module.params.get(realm_param) + changeset[camel(realm_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_realm = before_realm.copy() + desired_realm.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + before_realm_sanitized = sanitize_cr(before_realm) + result['existing'] = before_realm_sanitized + + # Cater for when it doesn't exist (an empty dict) + if not before_realm: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Realm does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'id' not in desired_realm: + module.fail_json(msg='id needs to be specified when creating a new realm') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_realm(desired_realm) + after_realm = kc.get_realm_by_id(desired_realm['id']) + + result['end_state'] = sanitize_cr(after_realm) + + result['msg'] = 'Realm %s has been created.' % desired_realm['id'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # doing an update + result['changed'] = True + if module.check_mode: + # We can only compare the current realm with the proposed updates we have + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(desired_realm)) + result['changed'] = (before_realm != desired_realm) + + module.exit_json(**result) + + # do the update + kc.update_realm(desired_realm, realm=realm) + + after_realm = kc.get_realm_by_id(realm=realm) + + if before_realm == after_realm: + result['changed'] = False + + result['end_state'] = sanitize_cr(after_realm) + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(after_realm)) + + result['msg'] = 'Realm %s has been updated.' % desired_realm['id'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_realm(realm=realm) + + result['proposed'] = {} + result['end_state'] = {} + + result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py b/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py new file mode 100644 index 000000000..5c2ebb4c9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_realm_info + +short_description: Allows obtaining Keycloak realm public information via Keycloak API + +version_added: 4.3.0 + +description: + - This module allows you to get Keycloak realm public information via the Keycloak REST API. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + realm: + type: str + description: + - They Keycloak realm ID. + default: 'master' + +author: + - Fynn Chen (@fynncfchen) +''' + +EXAMPLES = ''' +- name: Get a Keycloak public key + community.general.keycloak_realm_info: + realm: MyCustomRealm + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + +realm_info: + description: + - Representation of the realm public information. + returned: always + type: dict + contains: + realm: + description: Realm ID. + type: str + returned: always + sample: MyRealm + public_key: + description: Public key of the realm. + type: str + returned: always + sample: MIIBIjANBgkqhkiG9w0BAQEFAAO... + token-service: + description: Token endpoint URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect + account-service: + description: Account console URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/account + tokens-not-before: + description: The token not before. + type: int + returned: always + sample: 0 +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = dict( + auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), + validate_certs=dict(type='bool', default=True), + + realm=dict(default='master'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', realm_info='') + + kc = KeycloakAPI(module, {}) + + realm = module.params.get('realm') + + realm_info = kc.get_realm_info_by_id(realm=realm) + + result['realm_info'] = realm_info + result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_role.py b/ansible_collections/community/general/plugins/modules/keycloak_role.py new file mode 100644 index 000000000..bbec5f591 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_role.py @@ -0,0 +1,374 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_role + +short_description: Allows administration of Keycloak roles via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the role. + - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the role will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + required: true + description: + - Name of the role. + - This parameter is required. + + description: + type: str + description: + - The role description. + + realm: + type: str + description: + - The Keycloak realm under which this role resides. + default: 'master' + + client_id: + type: str + description: + - If the role is a client role, the client id under which it resides. + - If this parameter is absent, the role is considered a realm role. + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the role. + - Values may be single values (e.g. a string) or a list of strings. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create a Keycloak realm role, authentication with credentials + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak realm role, authentication with token + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Create a Keycloak client role + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + client_id: MyClient + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak role + community.general.keycloak_role: + name: my-role-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak role with some custom attributes + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new-role + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role myrole has been updated" + +proposed: + description: Representation of proposed role. + returned: always + type: dict + sample: { + "description": "My updated test description" + } + +existing: + description: Representation of existing role. + returned: always + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } + +end_state: + description: Representation of role after module execution (sample is truncated). + returned: on success + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + realm=dict(type='str', default='master'), + client_id=dict(type='str'), + attributes=dict(type='dict'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + name = module.params.get('name') + state = module.params.get('state') + + # attributes in Keycloak have their values returned as lists + # via the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if module.params.get('attributes') is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # Filter and map the parameters names that apply to the role + role_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if clientid is None: + before_role = kc.get_realm_role(name, realm) + else: + before_role = kc.get_client_role(name, clientid, realm) + + if before_role is None: + before_role = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in role_params: + new_param_value = module.params.get(param) + old_value = before_role[param] if param in before_role else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_role = before_role.copy() + desired_role.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_role + + # Cater for when it doesn't exist (an empty dict) + if not before_role: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Role does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new role') + + if module._diff: + result['diff'] = dict(before='', after=desired_role) + + if module.check_mode: + module.exit_json(**result) + + # create it + if clientid is None: + kc.create_realm_role(desired_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.create_client_role(desired_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = 'Role {name} has been created'.format(name=name) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_role == before_role: + result['changed'] = False + result['end_state'] = desired_role + result['msg'] = "No changes required to role {name}.".format(name=name) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after=desired_role) + + if module.check_mode: + module.exit_json(**result) + + # do the update + if clientid is None: + kc.update_realm_role(desired_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.update_client_role(desired_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = "Role {name} has been updated".format(name=name) + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + if clientid is None: + kc.delete_realm_role(name, realm) + else: + kc.delete_client_role(name, clientid, realm) + + result['end_state'] = {} + + result['msg'] = "Role {name} has been deleted".format(name=name) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py new file mode 100644 index 000000000..c0dc5d271 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py @@ -0,0 +1,1021 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_user_federation + +short_description: Allows administration of Keycloak user federations via Keycloak API + +version_added: 3.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the user federation. + - On C(present), the user federation will be created if it does not yet exist, or updated with + the parameters you provide. + - On C(absent), the user federation will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this user federation resides. + default: 'master' + type: str + + id: + description: + - The unique ID for this user federation. If left empty, the user federation will be searched + by its I(name). + type: str + + name: + description: + - Display name of provider when linked in admin console. + type: str + + provider_id: + description: + - Provider for this user federation. + aliases: + - providerId + type: str + choices: + - ldap + - kerberos + - sssd + + provider_type: + description: + - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)). + aliases: + - providerType + default: org.keycloak.storage.UserStorageProvider + type: str + + parent_id: + description: + - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank. + aliases: + - parentId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on + the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd). + It is easiest to obtain valid config values by dumping an already-existing user federation + configuration through check-mode in the I(existing) field. + - The value C(sssd) has been supported since community.general 4.2.0. + type: dict + suboptions: + enabled: + description: + - Enable/disable this user federation. + default: true + type: bool + + priority: + description: + - Priority of provider when doing a user lookup. Lowest first. + default: 0 + type: int + + importEnabled: + description: + - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured + sync policies. + default: true + type: bool + + editMode: + description: + - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP + on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP. + type: str + choices: + - READ_ONLY + - WRITABLE + - UNSYNCED + + syncRegistrations: + description: + - Should newly created users be created within LDAP store? Priority effects which + provider is chosen to sync the new user. + default: false + type: bool + + vendor: + description: + - LDAP vendor (provider). + - Use short name. For instance, write C(rhds) for "Red Hat Directory Server". + type: str + + usernameLDAPAttribute: + description: + - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server + vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn). + The attribute should be filled for all LDAP user records you want to import from + LDAP to Keycloak. + type: str + + rdnLDAPAttribute: + description: + - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. + Usually it's the same as Username LDAP attribute, however it is not required. For + example for Active directory, it is common to use C(cn) as RDN attribute when + username attribute might be C(sAMAccountName). + type: str + + uuidLDAPAttribute: + description: + - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects + in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different. + For example for Active directory it should be C(objectGUID). If your LDAP server does + not support the notion of UUID, you can use any other attribute that is supposed to + be unique among LDAP users in tree. + type: str + + userObjectClasses: + description: + - All values of LDAP objectClass attribute for users in LDAP divided by comma. + For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users + will be written to LDAP with all those object classes and existing LDAP user records + are found just if they contain all those object classes. + type: str + + connectionUrl: + description: + - Connection URL to your LDAP server. + type: str + + usersDn: + description: + - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. + type: str + + customUserSearchFilter: + description: + - Additional LDAP Filter for filtering searched users. Leave this empty if you don't + need additional filter. + type: str + + searchScope: + description: + - For one level, the search applies only for users in the DNs specified by User DNs. + For subtree, the search applies to the whole subtree. See LDAP documentation for + more details. + default: '1' + type: str + choices: + - '1' + - '2' + + authType: + description: + - Type of the Authentication method used during LDAP Bind operation. It is used in + most of the requests sent to the LDAP server. + default: 'none' + type: str + choices: + - none + - simple + + bindDn: + description: + - DN of LDAP user which will be used by Keycloak to access LDAP server. + type: str + + bindCredential: + description: + - Password of LDAP admin. + type: str + + startTls: + description: + - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling. + default: false + type: bool + + usePasswordModifyExtendedOp: + description: + - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify + extended operation usually requires that LDAP user already has password in the LDAP + server. So when this is used with 'Sync Registrations', it can be good to add also + 'Hardcoded LDAP attribute mapper' with randomly generated initial password. + default: false + type: bool + + validatePasswordPolicy: + description: + - Determines if Keycloak should validate the password with the realm password policy + before updating it. + default: false + type: bool + + trustEmail: + description: + - If enabled, email provided by this provider is not verified even if verification is + enabled for the realm. + default: false + type: bool + + useTruststoreSpi: + description: + - Specifies whether LDAP connection will use the truststore SPI with the truststore + configured in standalone.xml/domain.xml. C(Always) means that it will always use it. + C(Never) means that it will not use it. C(Only for ldaps) means that it will use if + your connection URL use ldaps. Note even if standalone.xml/domain.xml is not + configured, the default Java cacerts or certificate specified by + C(javax.net.ssl.trustStore) property will be used. + default: ldapsOnly + type: str + choices: + - always + - ldapsOnly + - never + + connectionTimeout: + description: + - LDAP Connection Timeout in milliseconds. + type: int + + readTimeout: + description: + - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. + type: int + + pagination: + description: + - Does the LDAP server support pagination. + default: true + type: bool + + connectionPooling: + description: + - Determines if Keycloak should use connection pooling for accessing LDAP server. + default: true + type: bool + + connectionPoolingAuthentication: + description: + - A list of space-separated authentication types of connections that may be pooled. + type: str + choices: + - none + - simple + - DIGEST-MD5 + + connectionPoolingDebug: + description: + - A string that indicates the level of debug output to produce. Example valid values are + C(fine) (trace connection creation and removal) and C(all) (all debugging information). + type: str + + connectionPoolingInitSize: + description: + - The number of connections per connection identity to create when initially creating a + connection for the identity. + type: int + + connectionPoolingMaxSize: + description: + - The maximum number of connections per connection identity that can be maintained + concurrently. + type: int + + connectionPoolingPrefSize: + description: + - The preferred number of connections per connection identity that should be maintained + concurrently. + type: int + + connectionPoolingProtocol: + description: + - A list of space-separated protocol types of connections that may be pooled. + Valid types are C(plain) and C(ssl). + type: str + + connectionPoolingTimeout: + description: + - The number of milliseconds that an idle connection may remain in the pool without + being closed and removed from the pool. + type: int + + allowKerberosAuthentication: + description: + - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data + about authenticated users will be provisioned from this LDAP server. + default: false + type: bool + + kerberosRealm: + description: + - Name of kerberos realm. + type: str + + serverPrincipal: + description: + - Full name of server principal for HTTP service including server and domain name. For + example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the + KeyTab file. + type: str + + keyTab: + description: + - Location of Kerberos KeyTab file containing the credentials of server principal. For + example C(/etc/krb5.keytab). + type: str + + debug: + description: + - Enable/disable debug logging to standard output for Krb5LoginModule. + type: bool + + useKerberosForPasswordAuthentication: + description: + - Use Kerberos login module for authenticate username/password against Kerberos server + instead of authenticating against LDAP server with Directory Service API. + default: false + type: bool + + allowPasswordAuthentication: + description: + - Enable/disable possibility of username/password authentication against Kerberos database. + type: bool + + batchSizeForSync: + description: + - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. + default: 1000 + type: int + + fullSyncPeriod: + description: + - Period for full synchronization in seconds. + default: -1 + type: int + + changedSyncPeriod: + description: + - Period for synchronization of changed or newly created LDAP users in seconds. + default: -1 + type: int + + updateProfileFirstLogin: + description: + - Update profile on first login. + type: bool + + cachePolicy: + description: + - Cache Policy for this storage provider. + type: str + default: 'DEFAULT' + choices: + - DEFAULT + - EVICT_DAILY + - EVICT_WEEKLY + - MAX_LIFESPAN + - NO_CACHE + + evictionDay: + description: + - Day of the week the entry will become invalid on. + type: str + + evictionHour: + description: + - Hour of day the entry will become invalid on. + type: str + + evictionMinute: + description: + - Minute of day the entry will become invalid on. + type: str + + maxLifespan: + description: + - Max lifespan of cache entry in milliseconds. + type: int + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. If no ID is given, the mapper will be searched by name. + type: str + + parentId: + description: + - Unique ID for the parent of this mapper. ID of the user federation will automatically + be used if left blank. + type: str + + providerId: + description: + - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)). + type: str + + providerType: + description: + - Component type for this mapper. + type: str + default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ + depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' + - name: Create LDAP user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-ldap + state: present + provider_id: ldap + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + batchSizeForSync: 1000 + editMode: READ_ONLY + importEnabled: true + syncRegistrations: false + vendor: other + usernameLDAPAttribute: uid + rdnLDAPAttribute: uid + uuidLDAPAttribute: entryUUID + userObjectClasses: inetOrgPerson, organizationalPerson + connectionUrl: ldaps://ldap.example.com:636 + usersDn: ou=Users,dc=example,dc=com + authType: simple + bindDn: cn=directory reader + bindCredential: password + searchScope: 1 + validatePasswordPolicy: false + trustEmail: false + useTruststoreSpi: ldapsOnly + connectionPooling: true + pagination: true + allowKerberosAuthentication: false + debug: false + useKerberosForPasswordAuthentication: false + mappers: + - name: "full name" + providerId: "full-name-ldap-mapper" + providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + config: + ldap.full.name.attribute: cn + read.only: true + write.only: false + + - name: Create Kerberos user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-kerberos + state: present + provider_id: kerberos + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + kerberosRealm: EXAMPLE.COM + serverPrincipal: HTTP/host.example.com@EXAMPLE.COM + keyTab: keytab + allowPasswordAuthentication: false + updateProfileFirstLogin: false + + - name: Create sssd user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-sssd + state: present + provider_id: sssd + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + + - name: Delete user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-federation + state: absent + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." + +proposed: + description: Representation of proposed user federation. + returned: always + type: dict + sample: { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "name": "ldap", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +existing: + description: Representation of existing user federation. + returned: always + type: dict + sample: { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "changedSyncPeriod": "-1", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "fullSyncPeriod": "-1", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "mappers": [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", + "name": "email", + "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + } + ], + "name": "myfed", + "parentId": "myrealm", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +end_state: + description: Representation of user federation after module execution. + returned: on success + type: dict + sample: { + "config": { + "allowPasswordAuthentication": "false", + "cachePolicy": "DEFAULT", + "enabled": "true", + "kerberosRealm": "EXAMPLE.COM", + "keyTab": "/etc/krb5.keytab", + "priority": "0", + "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", + "updateProfileFirstLogin": "false" + }, + "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", + "mappers": [], + "name": "kerberos", + "parentId": "myrealm", + "providerId": "kerberos", + "providerType": "org.keycloak.storage.UserStorageProvider" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from copy import deepcopy + + +def sanitize(comp): + compcopy = deepcopy(comp) + if 'config' in compcopy: + compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) + if 'bindCredential' in compcopy['config']: + compcopy['config']['bindCredential'] = '**********' + if 'mappers' in compcopy: + for mapper in compcopy['mappers']: + if 'config' in mapper: + mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) + return compcopy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + config_spec = dict( + allowKerberosAuthentication=dict(type='bool', default=False), + allowPasswordAuthentication=dict(type='bool'), + authType=dict(type='str', choices=['none', 'simple'], default='none'), + batchSizeForSync=dict(type='int', default=1000), + bindCredential=dict(type='str', no_log=True), + bindDn=dict(type='str'), + cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), + changedSyncPeriod=dict(type='int', default=-1), + connectionPooling=dict(type='bool', default=True), + connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), + connectionPoolingDebug=dict(type='str'), + connectionPoolingInitSize=dict(type='int'), + connectionPoolingMaxSize=dict(type='int'), + connectionPoolingPrefSize=dict(type='int'), + connectionPoolingProtocol=dict(type='str'), + connectionPoolingTimeout=dict(type='int'), + connectionTimeout=dict(type='int'), + connectionUrl=dict(type='str'), + customUserSearchFilter=dict(type='str'), + debug=dict(type='bool'), + editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), + enabled=dict(type='bool', default=True), + evictionDay=dict(type='str'), + evictionHour=dict(type='str'), + evictionMinute=dict(type='str'), + fullSyncPeriod=dict(type='int', default=-1), + importEnabled=dict(type='bool', default=True), + kerberosRealm=dict(type='str'), + keyTab=dict(type='str', no_log=False), + maxLifespan=dict(type='int'), + pagination=dict(type='bool', default=True), + priority=dict(type='int', default=0), + rdnLDAPAttribute=dict(type='str'), + readTimeout=dict(type='int'), + searchScope=dict(type='str', choices=['1', '2'], default='1'), + serverPrincipal=dict(type='str'), + startTls=dict(type='bool', default=False), + syncRegistrations=dict(type='bool', default=False), + trustEmail=dict(type='bool', default=False), + updateProfileFirstLogin=dict(type='bool'), + useKerberosForPasswordAuthentication=dict(type='bool', default=False), + usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), + useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), + userObjectClasses=dict(type='str'), + usernameLDAPAttribute=dict(type='str'), + usersDn=dict(type='str'), + uuidLDAPAttribute=dict(type='str'), + validatePasswordPolicy=dict(type='bool', default=False), + vendor=dict(type='str'), + ) + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + parentId=dict(type='str'), + providerId=dict(type='str'), + providerType=dict(type='str', default='org.keycloak.storage.ldap.mappers.LDAPStorageMapper'), + config=dict(type='dict'), + ) + + meta_args = dict( + config=dict(type='dict', options=config_spec), + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + id=dict(type='str'), + name=dict(type='str'), + provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']), + provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), + parent_id=dict(type='str', aliases=['parentId']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + config = module.params.get('config') + mappers = module.params.get('mappers') + cid = module.params.get('id') + name = module.params.get('name') + + # Keycloak API expects config parameters to be arrays containing a single string element + if config is not None: + module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) + for k, v in config.items() if config[k] is not None) + + if mappers is not None: + for mapper in mappers: + if mapper.get('config') is not None: + mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) + for k, v in mapper['config'].items() if mapper['config'][k] is not None) + + # Filter and map the parameters names that apply + comp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm) + if len(found) > 1: + module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) + before_comp = next(iter(found), None) + if before_comp is not None: + cid = before_comp['id'] + else: + before_comp = kc.get_component(cid, realm) + + if before_comp is None: + before_comp = {} + + # if user federation exists, get associated mappers + if cid is not None and before_comp: + before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in comp_params: + new_param_value = module.params.get(param) + old_value = before_comp[camel(param)] if camel(param) in before_comp else None + if param == 'mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + if module.params['provider_id'] in ['kerberos', 'sssd']: + module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) + for change in module.params['mappers']: + change = dict((k, v) for k, v in change.items() if change[k] is not None) + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if cid is None: + old_mapper = {} + elif change.get('id') is not None: + old_mapper = kc.get_component(change['id'], realm) + if old_mapper is None: + old_mapper = {} + else: + found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + new_mapper = old_mapper.copy() + new_mapper.update(change) + if new_mapper != old_mapper: + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_comp = before_comp.copy() + desired_comp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_comp) + + # Cater for when it doesn't exist (an empty dict) + if not before_comp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'User federation does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_comp)) + + if module.check_mode: + module.exit_json(**result) + + # create it + desired_comp = desired_comp.copy() + updated_mappers = desired_comp.pop('mappers', []) + after_comp = kc.create_component(desired_comp, realm) + + cid = after_comp['id'] + + for mapper in updated_mappers: + found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm) + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + + new_mapper = old_mapper.copy() + new_mapper.update(mapper) + + if new_mapper.get('id') is not None: + kc.update_component(new_mapper, realm) + else: + if new_mapper.get('parentId') is None: + new_mapper['parentId'] = after_comp['id'] + mapper = kc.create_component(new_mapper, realm) + + after_comp['mappers'] = updated_mappers + result['end_state'] = sanitize(after_comp) + + result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_comp == before_comp: + result['changed'] = False + result['end_state'] = sanitize(desired_comp) + result['msg'] = "No changes required to user federation {id}.".format(id=cid) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_comp = desired_comp.copy() + updated_mappers = desired_comp.pop('mappers', []) + kc.update_component(desired_comp, realm) + after_comp = kc.get_component(cid, realm) + + for mapper in updated_mappers: + if mapper.get('id') is not None: + kc.update_component(mapper, realm) + else: + if mapper.get('parentId') is None: + mapper['parentId'] = desired_comp['id'] + mapper = kc.create_component(mapper, realm) + + after_comp['mappers'] = updated_mappers + result['end_state'] = sanitize(after_comp) + + result['msg'] = "User federation {id} has been updated".format(id=cid) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_component(cid, realm) + + result['end_state'] = {} + + result['msg'] = "User federation {id} has been deleted".format(id=cid) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py new file mode 100644 index 000000000..d754e313a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Dušan Marković (@bratwurzt) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_user_rolemapping + +short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API + +version_added: 5.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup + to the API to translate the name into the role ID. + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + state: + description: + - State of the user_rolemapping. + - On C(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the user_rolemapping will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + target_username: + type: str + description: + - Username of the user roles are mapped to. + - This parameter is not required (can be replaced by uid for less API call). + + uid: + type: str + description: + - ID of the user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + service_account_user_client_id: + type: str + description: + - Client ID of the service-account-user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + client_id: + type: str + description: + - Name of the client to be mapped (different than I(cid)). + - This parameter is required if I(cid) is not provided (can be replaced by I(cid) + to reduce the number of API calls that must be made). + + cid: + type: str + description: + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + roles: + description: + - Roles to be mapped to the user. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but + providing it will reduce the number of API calls required. + +extends_documentation_fragment: + - community.general.keycloak + - community.general.attributes + +author: + - Dušan Marković (@bratwurzt) +''' + +EXAMPLES = ''' +- name: Map a client role to a user, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + user_id: user1Id + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a service account user for a client, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + service_account_user_client_id: clientIdOfServiceAccount + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a user, authentication with token + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + target_username: user1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a user + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + uid: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to user user1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: { + clientId: "test" + } + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + uid=dict(type='str'), + target_username=dict(type='str'), + service_account_user_client_id=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'], + ['uid', 'target_username', 'service_account_user_client_id']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + uid = module.params.get('uid') + target_username = module.params.get('target_username') + service_account_user_client_id = module.params.get('service_account_user_client_id') + roles = module.params.get('roles') + + # Check the parameters + if uid is None and target_username is None and service_account_user_client_id is None: + module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.') + + # Get the potential missing parameters + if uid is None and service_account_user_client_id is None: + user_rep = kc.get_user_by_username(username=target_username, realm=realm) + if user_rep is not None: + uid = user_rep.get('id') + else: + module.fail_json(msg='Could not fetch user for username %s:' % target_username) + else: + if uid is None and target_username is None: + user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm) + if user_rep is not None: + uid = user_rep['id'] + else: + module.fail_json(msg='Could not fetch service-account-user for client_id %s:' % target_username) + + if cid is None and client_id is not None: + cid = kc.get_client_id(client_id=client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role.get('name') is None and role.get('id') is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role.get('id') is None: + if cid is None: + role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id'] + else: + role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('name'), client_id, realm)) + # Fetch missing role_name + else: + if cid is None: + role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name'] + else: + role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name'] + if role.get('name') is None: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('id'), client_id, realm)) + + # Get effective role mappings + if cid is None: + available_roles_before = kc.get_realm_user_available_rolemappings(uid=uid, realm=realm) + assigned_roles_before = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm) + assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = roles + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role.get('name') == available_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role.get('name') == assigned_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s removed from userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are correctly mapped to user for username %s.' % (roles, target_username) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/keyring.py b/ansible_collections/community/general/plugins/modules/keyring.py new file mode 100644 index 000000000..ada22ed58 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keyring.py @@ -0,0 +1,279 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: keyring +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Set or delete a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) + to set or delete passphrases for a given service and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Gnome keyring access) + - dbus-run-session (application - required for headless Gnome keyring access) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + user_password: + description: The password to set. + required: false + type: str + aliases: + - password + keyring_password: + description: Password to unlock keyring. + required: true + type: str + state: + description: Whether the password should exist. + required: false + default: present + type: str + choices: + - present + - absent +""" + +EXAMPLES = r""" +- name: Set a password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + +- name: Delete the password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + state: absent +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def del_passphrase(module): + """ + Attempt to delete a passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.delete_password(module.params["service"], module.params["username"]) + return None + except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable + delete_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring del %s %s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=delete_argument, + encoding=None, + ) + + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def set_passphrase(module): + """ + Attempt to set passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.set_password( + module.params["service"], + module.params["username"], + module.params["user_password"], + ) + return None + except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable + set_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring set %s %s\n%s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + quote(module.params["user_password"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=set_argument, + encoding=None, + ) + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def get_passphrase(module): + """ + Attempt to retrieve passphrase from keyring using the Python API and fallback to using a shell. + """ + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + return passphrase + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] # Only return the line containing the password + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict( + changed=False, + msg="", + ) + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + user_password=dict( + type="str", required=False, no_log=True, aliases=["password"] + ), + state=dict( + type="str", required=False, default="present", choices=["absent", "present"] + ), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + + passphrase = get_passphrase(module) + if module.params["state"] == "present": + if passphrase is not None: + if passphrase == module.params["user_password"]: + result["msg"] = "Passphrase already set for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase != module.params["user_password"]: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + if passphrase is None: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + + if module.params["state"] == "absent": + if not passphrase: + result["result"] = "Passphrase already absent for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase: + del_result = del_passphrase(module) + if del_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been removed for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if del_result is not None: + module.fail_json(msg=del_result) + + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/keyring_info.py b/ansible_collections/community/general/plugins/modules/keyring_info.py new file mode 100644 index 000000000..5c41ecc4d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/keyring_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring_info +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: keyring_info +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Get a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) + to retrieve passphrases for a given service and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Linux keyring access) + - dbus-run-session (application - required for headless Linux keyring access) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + keyring_password: + description: Password to unlock keyring. + required: true + type: str +""" + +EXAMPLES = r""" + - name: Retrieve password for service_name/user_name + community.general.keyring_info: + service: test + username: test1 + keyring_password: "{{ keyring_password }}" + register: test_password + + - name: Display password + ansible.builtin.debug: + msg: "{{ test_password.passphrase }}" +""" + +RETURN = r""" + passphrase: + description: A string containing the password. + returned: success and the password exists + type: str + sample: Password123 +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def _alternate_retrieval_method(module): + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict(changed=False, msg="") + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + + if passphrase is None: + passphrase = _alternate_retrieval_method(module) + + if passphrase is not None: + result["msg"] = "Successfully retrieved password for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + result["passphrase"] = passphrase + if passphrase is None: + result["msg"] = "Password for %s@%s does not exist." % ( + module.params["service"], + module.params["username"], + ) + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/ansible_collections/community/general/plugins/modules/kibana_plugin.py new file mode 100644 index 000000000..a52eda2fd --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/kibana_plugin.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Thierno IB. BARRY @barryib +# Sponsored by Polyconseil http://polyconseil.fr. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: kibana_plugin +short_description: Manage Kibana plugins +description: + - This module can be used to manage Kibana plugins. +author: Thierno IB. BARRY (@barryib) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + url: + description: + - Set exact URL to download the plugin from. + - For local file, prefix its absolute path with file:// + type: str + timeout: + description: + - "Timeout setting: 30s, 1m, 1h etc." + default: 1m + type: str + plugin_bin: + description: + - Location of the Kibana binary. + default: /opt/kibana/bin/kibana + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Kibana. + default: /opt/kibana/installedPlugins/ + type: path + version: + description: + - Version of the plugin to be installed. + - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. + type: str + force: + description: + - Delete and re-install the plugin. Can be useful for plugins update. + type: bool + default: false + allow_root: + description: + - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. + type: bool + default: false + version_added: 2.3.0 +''' + +EXAMPLES = ''' +- name: Install Elasticsearch head plugin + community.general.kibana_plugin: + state: present + name: elasticsearch/marvel + +- name: Install specific version of a plugin + community.general.kibana_plugin: + state: present + name: elasticsearch/marvel + version: '2.3.3' + +- name: Uninstall Elasticsearch head plugin + community.general.kibana_plugin: + state: absent + name: elasticsearch/marvel +''' + +RETURN = ''' +cmd: + description: the launched command during plugin management (install / remove) + returned: success + type: str +name: + description: the plugin name to install or remove + returned: success + type: str +url: + description: the url from where the plugin is installed from + returned: success + type: str +timeout: + description: the timeout for plugin download + returned: success + type: str +stdout: + description: the command stdout + returned: success + type: str +stderr: + description: the command stderr + returned: success + type: str +state: + description: the state for the managed plugin + returned: success + type: str +''' + +import os +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +PACKAGE_STATE_MAP = dict( + present="--install", + absent="--remove" +) + + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + + +def is_plugin_present(plugin_dir, working_dir): + return os.path.isdir(os.path.join(working_dir, plugin_dir)) + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "install"] + if url: + cmd_args.append(url) + else: + cmd_args.append(plugin_name) + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] + + if url: + cmd_args.extend(["--url", url]) + + if timeout: + cmd_args.extend(["--timeout", timeout]) + + if allow_root: + cmd_args.append('--allow-root') + + if module.check_mode: + return True, " ".join(cmd_args), "check mode", "" + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, " ".join(cmd_args), out, err + + +def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): + if LooseVersion(kibana_version) > LooseVersion('4.6'): + kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') + cmd_args = [kibana_plugin_bin, "remove", plugin_name] + else: + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] + + if allow_root: + cmd_args.append('--allow-root') + + if module.check_mode: + return True, " ".join(cmd_args), "check mode", "" + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, " ".join(cmd_args), out, err + + +def get_kibana_version(module, plugin_bin, allow_root): + cmd_args = [plugin_bin, '--version'] + + if allow_root: + cmd_args.append('--allow-root') + + rc, out, err = module.run_command(cmd_args) + if rc != 0: + module.fail_json(msg="Failed to get Kibana version : %s" % err) + + return out.strip() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + url=dict(default=None), + timeout=dict(default="1m"), + plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), + plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), + version=dict(default=None), + force=dict(default=False, type="bool"), + allow_root=dict(default=False, type="bool"), + ), + supports_check_mode=True, + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + version = module.params["version"] + force = module.params["force"] + allow_root = module.params["allow_root"] + + changed, cmd, out, err = False, '', '', '' + + kibana_version = get_kibana_version(module, plugin_bin, allow_root) + + present = is_plugin_present(parse_plugin_repo(name), plugin_dir) + + # skip if the state is correct + if (present and state == "present" and not force) or (state == "absent" and not present and not force): + module.exit_json(changed=False, name=name, state=state) + + if version: + name = name + '/' + version + + if state == "present": + if force: + remove_plugin(module, plugin_bin, name, allow_root, kibana_version) + changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/launchd.py b/ansible_collections/community/general/plugins/modules/launchd.py new file mode 100644 index 000000000..13a8ce086 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/launchd.py @@ -0,0 +1,522 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Martin Migasiewicz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: launchd +author: + - Martin Migasiewicz (@martinm82) +short_description: Manage macOS services +version_added: 1.0.0 +description: + - Manage launchd services on target macOS hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service. + type: str + required: true + state: + description: + - C(started)/C(stopped) are idempotent actions that will not run + commands unless necessary. + - Launchd does not support C(restarted) nor C(reloaded) natively. + These will trigger a stop/start (restarted) or an unload/load + (reloaded). + - C(restarted) unloads and loads the service before start to ensure + that the latest job definition (plist) is used. + - C(reloaded) unloads and loads the service to ensure that the latest + job definition (plist) is used. Whether a service is started or + stopped depends on the content of the definition file. + type: str + choices: [ reloaded, restarted, started, stopped, unloaded ] + enabled: + description: + - Whether the service should start on boot. + - B(At least one of state and enabled are required.) + type: bool + force_stop: + description: + - Whether the service should not be restarted automatically by launchd. + - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. + In case this is set to true, stopping a service will cause that launchd starts the service again. + - Set this option to C(true) to let this module change the 'KeepAlive' attribute to false. + type: bool + default: false +notes: +- A user must privileged to manage services using this module. +requirements: +- A system managed by launchd +- The plistlib python library +''' + +EXAMPLES = r''' +- name: Make sure spotify webhelper is started + community.general.launchd: + name: com.spotify.webhelper + state: started + +- name: Deploy custom memcached job definition + template: + src: org.memcached.plist.j2 + dest: /Library/LaunchDaemons/org.memcached.plist + +- name: Run memcached + community.general.launchd: + name: org.memcached + state: started + +- name: Stop memcached + community.general.launchd: + name: org.memcached + state: stopped + +- name: Stop memcached + community.general.launchd: + name: org.memcached + state: stopped + force_stop: true + +- name: Restart memcached + community.general.launchd: + name: org.memcached + state: restarted + +- name: Unload memcached + community.general.launchd: + name: org.memcached + state: unloaded +''' + +RETURN = r''' +status: + description: Metadata about service status + returned: always + type: dict + sample: + { + "current_pid": "-", + "current_state": "stopped", + "previous_pid": "82636", + "previous_state": "running" + } +''' + +import os +import plistlib +from abc import ABCMeta, abstractmethod +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class ServiceState: + UNKNOWN = 0 + LOADED = 1 + STOPPED = 2 + STARTED = 3 + UNLOADED = 4 + + @staticmethod + def to_string(state): + strings = { + ServiceState.UNKNOWN: 'unknown', + ServiceState.LOADED: 'loaded', + ServiceState.STOPPED: 'stopped', + ServiceState.STARTED: 'started', + ServiceState.UNLOADED: 'unloaded' + } + return strings[state] + + +class Plist: + def __init__(self, module, service): + self.__changed = False + self.__service = service + + state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() + + # Check if readPlist is available or not + self.old_plistlib = hasattr(plistlib, 'readPlist') + + self.__file = self.__find_service_plist(self.__service) + if self.__file is None: + msg = 'Unable to infer the path of %s service plist file' % self.__service + if pid is None and state == ServiceState.UNLOADED: + msg += ' and it was not found among active services' + module.fail_json(msg=msg) + self.__update(module) + + @staticmethod + def __find_service_plist(service_name): + """Finds the plist file associated with a service""" + + launchd_paths = [ + os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'), + '/Library/LaunchAgents', + '/Library/LaunchDaemons', + '/System/Library/LaunchAgents', + '/System/Library/LaunchDaemons' + ] + + for path in launchd_paths: + try: + files = os.listdir(path) + except OSError: + continue + + filename = '%s.plist' % service_name + if filename in files: + return os.path.join(path, filename) + return None + + def __update(self, module): + self.__handle_param_enabled(module) + self.__handle_param_force_stop(module) + + def __read_plist_file(self, module): + service_plist = {} + if self.old_plistlib: + return plistlib.readPlist(self.__file) + + # readPlist is deprecated in Python 3 and onwards + try: + with open(self.__file, 'rb') as plist_fp: + service_plist = plistlib.load(plist_fp) + except Exception as e: + module.fail_json(msg="Failed to read plist file " + "%s due to %s" % (self.__file, to_native(e))) + return service_plist + + def __write_plist_file(self, module, service_plist=None): + if not service_plist: + service_plist = {} + + if self.old_plistlib: + plistlib.writePlist(service_plist, self.__file) + return + # writePlist is deprecated in Python 3 and onwards + try: + with open(self.__file, 'wb') as plist_fp: + plistlib.dump(service_plist, plist_fp) + except Exception as e: + module.fail_json(msg="Failed to write to plist file " + " %s due to %s" % (self.__file, to_native(e))) + + def __handle_param_enabled(self, module): + if module.params['enabled'] is not None: + service_plist = self.__read_plist_file(module) + + # Enable/disable service startup at boot if requested + # Launchctl does not expose functionality to set the RunAtLoad + # attribute of a job definition. So we parse and modify the job + # definition plist file directly for this purpose. + if module.params['enabled'] is not None: + enabled = service_plist.get('RunAtLoad', False) + if module.params['enabled'] != enabled: + service_plist['RunAtLoad'] = module.params['enabled'] + + # Update the plist with one of the changes done. + if not module.check_mode: + self.__write_plist_file(module, service_plist) + self.__changed = True + + def __handle_param_force_stop(self, module): + if module.params['force_stop'] is not None: + service_plist = self.__read_plist_file(module) + + # Set KeepAlive to false in case force_stop is defined to avoid + # that the service gets restarted when stopping was requested. + if module.params['force_stop'] is not None: + keep_alive = service_plist.get('KeepAlive', False) + if module.params['force_stop'] and keep_alive: + service_plist['KeepAlive'] = not module.params['force_stop'] + + # Update the plist with one of the changes done. + if not module.check_mode: + self.__write_plist_file(module, service_plist) + self.__changed = True + + def is_changed(self): + return self.__changed + + def get_file(self): + return self.__file + + +class LaunchCtlTask(object): + __metaclass__ = ABCMeta + WAITING_TIME = 5 # seconds + + def __init__(self, module, service, plist): + self._module = module + self._service = service + self._plist = plist + self._launch = self._module.get_bin_path('launchctl', True) + + def run(self): + """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc. + and returns the new state and pid. + """ + self.runCommand() + return self.get_state() + + @abstractmethod + def runCommand(self): + pass + + def get_state(self): + rc, out, err = self._launchctl("list") + if rc != 0: + self._module.fail_json( + msg='Failed to get status of %s' % (self._launch)) + + state = ServiceState.UNLOADED + service_pid = "-" + status_code = None + for line in out.splitlines(): + if line.strip(): + pid, last_exit_code, label = line.split('\t') + if label.strip() == self._service: + service_pid = pid + status_code = last_exit_code + + # From launchctl man page: + # If the number [...] is negative, it represents the + # negative of the signal which killed the job. Thus, + # "-15" would indicate that the job was terminated with + # SIGTERM. + if last_exit_code not in ['0', '-2', '-3', '-9', '-15']: + # Something strange happened and we have no clue in + # which state the service is now. Therefore we mark + # the service state as UNKNOWN. + state = ServiceState.UNKNOWN + elif pid != '-': + # PID seems to be an integer so we assume the service + # is started. + state = ServiceState.STARTED + else: + # Exit code is 0 and PID is not available so we assume + # the service is stopped. + state = ServiceState.STOPPED + break + return (state, service_pid, status_code, err) + + def start(self): + rc, out, err = self._launchctl("start") + # Unfortunately launchd does not wait until the process really started. + sleep(self.WAITING_TIME) + return (rc, out, err) + + def stop(self): + rc, out, err = self._launchctl("stop") + # Unfortunately launchd does not wait until the process really stopped. + sleep(self.WAITING_TIME) + return (rc, out, err) + + def restart(self): + # TODO: check for rc, out, err + self.stop() + return self.start() + + def reload(self): + # TODO: check for rc, out, err + self.unload() + return self.load() + + def load(self): + return self._launchctl("load") + + def unload(self): + return self._launchctl("unload") + + def _launchctl(self, command): + service_or_plist = self._plist.get_file() if command in [ + 'load', 'unload'] else self._service if command in ['start', 'stop'] else "" + + rc, out, err = self._module.run_command( + '%s %s %s' % (self._launch, command, service_or_plist)) + + if rc != 0: + msg = "Unable to %s '%s' (%s): '%s'" % ( + command, self._service, self._plist.get_file(), err) + self._module.fail_json(msg=msg) + + return (rc, out, err) + + +class LaunchCtlStart(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlStart, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state in (ServiceState.STOPPED, ServiceState.LOADED): + self.reload() + self.start() + elif state == ServiceState.STARTED: + # In case the service is already in started state but the + # job definition was changed we need to unload/load the + # service and start the service again. + if self._plist.is_changed(): + self.reload() + self.start() + elif state == ServiceState.UNLOADED: + self.load() + self.start() + elif state == ServiceState.UNKNOWN: + # We are in an unknown state, let's try to reload the config + # and start the service again. + self.reload() + self.start() + + +class LaunchCtlStop(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlStop, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state == ServiceState.STOPPED: + # In case the service is stopped and we might later decide + # to start it, we need to reload the job definition by + # forcing an unload and load first. + # Afterwards we need to stop it as it might have been + # started again (KeepAlive or RunAtLoad). + if self._plist.is_changed(): + self.reload() + self.stop() + elif state in (ServiceState.STARTED, ServiceState.LOADED): + if self._plist.is_changed(): + self.reload() + self.stop() + elif state == ServiceState.UNKNOWN: + # We are in an unknown state, let's try to reload the config + # and stop the service gracefully. + self.reload() + self.stop() + + +class LaunchCtlReload(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlReload, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + + if state == ServiceState.UNLOADED: + # launchd throws an error if we do an unload on an already + # unloaded service. + self.load() + else: + self.reload() + + +class LaunchCtlUnload(LaunchCtlTask): + def __init__(self, module, service, plist): + super(LaunchCtlUnload, self).__init__(module, service, plist) + + def runCommand(self): + state, dummy, dummy, dummy = self.get_state() + self.unload() + + +class LaunchCtlRestart(LaunchCtlReload): + def __init__(self, module, service, plist): + super(LaunchCtlRestart, self).__init__(module, service, plist) + + def runCommand(self): + super(LaunchCtlRestart, self).runCommand() + self.start() + + +class LaunchCtlList(LaunchCtlTask): + def __init__(self, module, service): + super(LaunchCtlList, self).__init__(module, service, None) + + def runCommand(self): + # Do nothing, the list functionality is done by the + # base class run method. + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']), + enabled=dict(type='bool'), + force_stop=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_one_of=[ + ['state', 'enabled'], + ], + ) + + service = module.params['name'] + action = module.params['state'] + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + 'status': {}, + } + + # We will tailor the plist file in case one of the options + # (enabled, force_stop) was specified. + plist = Plist(module, service) + result['changed'] = plist.is_changed() + + # Gather information about the service to be controlled. + state, pid, dummy, dummy = LaunchCtlList(module, service).run() + result['status']['previous_state'] = ServiceState.to_string(state) + result['status']['previous_pid'] = pid + + # Map the actions to specific tasks + tasks = { + 'started': LaunchCtlStart(module, service, plist), + 'stopped': LaunchCtlStop(module, service, plist), + 'restarted': LaunchCtlRestart(module, service, plist), + 'reloaded': LaunchCtlReload(module, service, plist), + 'unloaded': LaunchCtlUnload(module, service, plist) + } + + status_code = '0' + # Run the requested task + if not module.check_mode: + state, pid, status_code, err = tasks[action].run() + + result['status']['current_state'] = ServiceState.to_string(state) + result['status']['current_pid'] = pid + result['status']['status_code'] = status_code + result['status']['error'] = err + + if (result['status']['current_state'] != result['status']['previous_state'] or + result['status']['current_pid'] != result['status']['previous_pid']): + result['changed'] = True + if module.check_mode: + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/layman.py b/ansible_collections/community/general/plugins/modules/layman.py new file mode 100644 index 000000000..940ac30d1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/layman.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Jakub Jirutka +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: layman +author: "Jakub Jirutka (@jirutka)" +short_description: Manage Gentoo overlays +description: + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. + Please note that Layman must be installed on a managed node prior using this module. +requirements: + - "python >= 2.6" + - layman python module +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The overlay id to install, synchronize, or uninstall. + Use 'ALL' to sync all of the installed overlays (can be used only when I(state=updated)). + required: true + type: str + list_url: + description: + - An URL of the alternative overlays list that defines the overlay to install. + This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where + C(overlay_defs) is readed from the Layman's configuration. + aliases: [url] + type: str + state: + description: + - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. + default: present + choices: [present, absent, updated] + type: str + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be + set to C(false) when no other option exists. Prior to 1.9.3 the code + defaulted to C(false). + type: bool + default: true +''' + +EXAMPLES = ''' +- name: Install the overlay mozilla which is on the central overlays list + community.general.layman: + name: mozilla + +- name: Install the overlay cvut from the specified alternative list + community.general.layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + +- name: Update (sync) the overlay cvut or install if not installed yet + community.general.layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + state: updated + +- name: Update (sync) all of the installed overlays + community.general.layman: + name: ALL + state: updated + +- name: Uninstall the overlay cvut + community.general.layman: + name: cvut + state: absent +''' + +import shutil +import traceback + +from os import path + +LAYMAN_IMP_ERR = None +try: + from layman.api import LaymanAPI + from layman.config import BareConfig + HAS_LAYMAN_API = True +except ImportError: + LAYMAN_IMP_ERR = traceback.format_exc() + HAS_LAYMAN_API = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url + + +USERAGENT = 'ansible-httpget' + + +class ModuleError(Exception): + pass + + +def init_layman(config=None): + '''Returns the initialized ``LaymanAPI``. + + :param config: the layman's configuration to use (optional) + ''' + if config is None: + config = BareConfig(read_configfile=True, quietness=1) + return LaymanAPI(config) + + +def download_url(module, url, dest): + ''' + :param url: the URL to download + :param dest: the absolute path of where to save the downloaded content to; + it must be writable and not a directory + + :raises ModuleError + ''' + + # Hack to add params in the form that fetch_url expects + module.params['http_agent'] = USERAGENT + response, info = fetch_url(module, url) + if info['status'] != 200: + raise ModuleError("Failed to get %s: %s" % (url, info['msg'])) + + try: + with open(dest, 'w') as f: + shutil.copyfileobj(response, f) + except IOError as e: + raise ModuleError("Failed to write: %s" % str(e)) + + +def install_overlay(module, name, list_url=None): + '''Installs the overlay repository. If not on the central overlays list, + then :list_url of an alternative list must be provided. The list will be + fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the + ``overlay_defs`` is read from the Layman's configuration). + + :param name: the overlay id + :param list_url: the URL of the remote repositories list to look for the overlay + definition (optional, default: None) + + :returns: True if the overlay was installed, or False if already exists + (i.e. nothing has changed) + :raises ModuleError + ''' + # read Layman configuration + layman_conf = BareConfig(read_configfile=True) + layman = init_layman(layman_conf) + + if layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would add layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + if not layman.is_repo(name): + if not list_url: + raise ModuleError("Overlay '%s' is not on the list of known " + "overlays and URL of the remote list was not provided." % name) + + overlay_defs = layman_conf.get_option('overlay_defs') + dest = path.join(overlay_defs, name + '.xml') + + download_url(module, list_url, dest) + + # reload config + layman = init_layman() + + if not layman.add_repos(name): + raise ModuleError(layman.get_errors()) + + return True + + +def uninstall_overlay(module, name): + '''Uninstalls the given overlay repository from the system. + + :param name: the overlay id to uninstall + + :returns: True if the overlay was uninstalled, or False if doesn't exist + (i.e. nothing has changed) + :raises ModuleError + ''' + layman = init_layman() + + if not layman.is_installed(name): + return False + + if module.check_mode: + mymsg = 'Would remove layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) + + layman.delete_repos(name) + if layman.get_errors(): + raise ModuleError(layman.get_errors()) + + return True + + +def sync_overlay(name): + '''Synchronizes the specified overlay repository. + + :param name: the overlay repository id to sync + :raises ModuleError + ''' + layman = init_layman() + + if not layman.sync(name): + messages = [str(item[1]) for item in layman.sync_results[2]] + raise ModuleError(messages) + + +def sync_overlays(): + '''Synchronize all of the installed overlays. + + :raises ModuleError + ''' + layman = init_layman() + + for name in layman.get_installed(): + sync_overlay(name) + + +def main(): + # define module + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + list_url=dict(aliases=['url']), + state=dict(default="present", choices=['present', 'absent', 'updated']), + validate_certs=dict(required=False, default=True, type='bool'), + ), + supports_check_mode=True + ) + + if not HAS_LAYMAN_API: + module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR) + + state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + + changed = False + try: + if state == 'present': + changed = install_overlay(module, name, url) + + elif state == 'updated': + if name == 'ALL': + sync_overlays() + elif install_overlay(module, name, url): + changed = True + else: + sync_overlay(name) + else: + changed = uninstall_overlay(module, name) + + except ModuleError as e: + module.fail_json(msg=e.message) + else: + module.exit_json(changed=changed, name=name) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lbu.py b/ansible_collections/community/general/plugins/modules/lbu.py new file mode 100644 index 000000000..c961b6060 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lbu.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Kaarle Ritvanen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: lbu + +short_description: Local Backup Utility for Alpine Linux + +version_added: '0.2.0' + +description: + - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + commit: + description: + - Control whether to commit changed files. + type: bool + exclude: + description: + - List of paths to exclude. + type: list + elements: str + include: + description: + - List of paths to include. + type: list + elements: str + +author: + - Kaarle Ritvanen (@kunkku) +''' + +EXAMPLES = ''' +# Commit changed files (if any) +- name: Commit + community.general.lbu: + commit: true + +# Exclude path and commit +- name: Exclude directory + community.general.lbu: + commit: true + exclude: + - /etc/opt + +# Include paths without committing +- name: Include file and directory + community.general.lbu: + include: + - /root/.ssh/authorized_keys + - /var/lib/misc +''' + +RETURN = ''' +msg: + description: Error message + type: str + returned: on failure +''' + +from ansible.module_utils.basic import AnsibleModule + +import os.path + + +def run_module(): + module = AnsibleModule( + argument_spec={ + 'commit': {'type': 'bool'}, + 'exclude': {'type': 'list', 'elements': 'str'}, + 'include': {'type': 'list', 'elements': 'str'} + }, + supports_check_mode=True + ) + + changed = False + + def run_lbu(*args): + code, stdout, stderr = module.run_command( + [module.get_bin_path('lbu', required=True)] + list(args) + ) + if code: + module.fail_json(changed=changed, msg=stderr) + return stdout + + update = False + commit = False + + for param in ('include', 'exclude'): + if module.params[param]: + paths = run_lbu(param, '-l').split('\n') + for path in module.params[param]: + if os.path.normpath('/' + path)[1:] not in paths: + update = True + + if module.params['commit']: + commit = update or run_lbu('status') > '' + + if module.check_mode: + module.exit_json(changed=update or commit) + + if update: + for param in ('include', 'exclude'): + if module.params[param]: + run_lbu(param, *module.params[param]) + changed = True + + if commit: + run_lbu('commit') + changed = True + + module.exit_json(changed=changed) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/ansible_collections/community/general/plugins/modules/ldap_attrs.py new file mode 100644 index 000000000..c2cac8644 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ldap_attrs.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Maciej Delmanowski +# Copyright (c) 2017, Alexander Korinek +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: ldap_attrs +short_description: Add or remove multiple LDAP attribute values +description: + - Add or remove multiple LDAP attribute values. +notes: + - This only deals with attributes on existing entries. To add or remove + whole entries, see M(community.general.ldap_entry). + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). + - For I(state=present) and I(state=absent), all value comparisons are + performed on the server for maximum accuracy. For I(state=exact), values + have to be compared in Python, which obviously ignores LDAP matching + rules. This should work out in most cases, but it is theoretically + possible to see spurious changes when target and actual values are + semantically identical but lexically distinct. +version_added: '0.2.0' +author: + - Jiri Tyr (@jtyr) + - Alexander Korinek (@noles) + - Maciej Delmanowski (@drybjed) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + required: false + type: str + choices: [present, absent, exact] + default: present + description: + - The state of the attribute values. If C(present), all given attribute + values will be added if they're missing. If C(absent), all given + attribute values will be removed if present. If C(exact), the set of + attribute values will be forced to exactly those provided and no others. + If I(state=exact) and the attribute I(value) is empty, all values for + this attribute will be removed. + attributes: + required: true + type: dict + description: + - The attribute(s) and value(s) to add or remove. + - Each attribute value can be a string for single-valued attributes or + a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve + readability for long string values by using YAML block modifiers as seen in the + examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, + like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if + these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + ordered: + required: false + type: bool + default: false + description: + - If C(true), prepend list values with X-ORDERED index numbers in all + attributes specified in the current task. This is useful mostly with + I(olcAccess) attribute to easily manage LDAP Access Control Lists. +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes + +''' + + +EXAMPLES = r''' +- name: Configure directory number 1 for example.com + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcSuffix: dc=example,dc=com + state: exact + +# The complex argument format is required here to pass a list of ACL strings. +- name: Set up the ACL + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + state: exact + +# An alternative approach with automatic X-ORDERED numbering +- name: Set up the ACL + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcAccess: + - >- + to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + ordered: true + state: exact + +- name: Declare some indexes + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcDbIndex: + - objectClass eq + - uid eq + +- name: Set up a root user, which we can use later to bootstrap the directory + community.general.ldap_attrs: + dn: olcDatabase={1}hdb,cn=config + attributes: + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + state: exact + +- name: Remove an attribute with a specific value + community.general.ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: "An example user account" + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +- name: Remove specified attribute(s) from an entry + community.general.ldap_attrs: + dn: uid=jdoe,ou=people,dc=example,dc=com + attributes: + description: [] + state: exact + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password +''' + + +RETURN = r''' +modlist: + description: list of modified parameters + returned: success + type: list + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +import re + +LDAP_IMP_ERR = None +try: + import ldap + import ldap.filter + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapAttrs(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.attrs = self.module.params['attributes'] + self.state = self.module.params['state'] + self.ordered = self.module.params['ordered'] + + def _order_values(self, values): + """ Preprend X-ORDERED index numbers to attribute's values. """ + ordered_values = [] + + if isinstance(values, list): + for index, value in enumerate(values): + cleaned_value = re.sub(r'^\{\d+\}', '', value) + ordered_values.append('{' + str(index) + '}' + cleaned_value) + + return ordered_values + + def _normalize_values(self, values): + """ Normalize attribute's values. """ + norm_values = [] + + if isinstance(values, list): + if self.ordered: + norm_values = list(map(to_bytes, + self._order_values(list(map(str, + values))))) + else: + norm_values = list(map(to_bytes, values)) + else: + norm_values = [to_bytes(str(values))] + + return norm_values + + def add(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + for value in norm_values: + if self._is_value_absent(name, value): + modlist.append((ldap.MOD_ADD, name, value)) + + return modlist + + def delete(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + for value in norm_values: + if self._is_value_present(name, value): + modlist.append((ldap.MOD_DELETE, name, value)) + + return modlist + + def exact(self): + modlist = [] + for name, values in self.module.params['attributes'].items(): + norm_values = self._normalize_values(values) + try: + results = self.connection.search_s( + self.dn, ldap.SCOPE_BASE, attrlist=[name]) + except ldap.LDAPError as e: + self.fail("Cannot search for attribute %s" % name, e) + + current = results[0][1].get(name, []) + + if frozenset(norm_values) != frozenset(current): + if len(current) == 0: + modlist.append((ldap.MOD_ADD, name, norm_values)) + elif len(norm_values) == 0: + modlist.append((ldap.MOD_DELETE, name, None)) + else: + modlist.append((ldap.MOD_REPLACE, name, norm_values)) + + return modlist + + def _is_value_present(self, name, value): + """ True if the target attribute has the given value. """ + try: + escaped_value = ldap.filter.escape_filter_chars(to_text(value)) + filterstr = "(%s=%s)" % (name, escaped_value) + dns = self.connection.search_s(self.dn, ldap.SCOPE_BASE, filterstr) + is_present = len(dns) == 1 + except ldap.NO_SUCH_OBJECT: + is_present = False + + return is_present + + def _is_value_absent(self, name, value): + """ True if the target attribute doesn't have the given value. """ + return not self._is_value_present(name, value) + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(type='dict', required=True), + ordered=dict(type='bool', default=False, required=False), + state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), + ), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + # Instantiate the LdapAttr object + ldap = LdapAttrs(module) + + state = module.params['state'] + + # Perform action + if state == 'present': + modlist = ldap.add() + elif state == 'absent': + modlist = ldap.delete() + elif state == 'exact': + modlist = ldap.exact() + + changed = False + + if len(modlist) > 0: + changed = True + + if not module.check_mode: + try: + ldap.connection.modify_s(ldap.dn, modlist) + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, modlist=modlist) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ldap_entry.py b/ansible_collections/community/general/plugins/modules/ldap_entry.py new file mode 100644 index 000000000..619bbf927 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ldap_entry.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ldap_entry +short_description: Add or remove LDAP entries +description: + - Add or remove LDAP entries. This module only asserts the existence or + non-existence of an LDAP entry, not its attributes. To assert the + attribute values of an entry, see M(community.general.ldap_attrs). +notes: + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). +author: + - Jiri Tyr (@jtyr) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + attributes: + description: + - If I(state=present), attributes necessary to create an entry. Existing + entries are never modified. To assert specific attribute values on an + existing entry, use M(community.general.ldap_attrs) module instead. + - Each attribute value can be a string for single-valued attributes or + a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve + readability for long string values by using YAML block modifiers as seen in the + examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, + like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if + these are meant to be strings. Otherwise the wrong values may be sent to LDAP. + type: dict + default: {} + objectClass: + description: + - If I(state=present), value or list of values to use when creating + the entry. It can either be a string or an actual list of + strings. + type: list + elements: str + state: + description: + - The target state of the entry. + choices: [present, absent] + default: present + type: str + recursive: + description: + - If I(state=delete), a flag indicating whether a single entry or the + whole branch must be deleted. + type: bool + default: false + version_added: 4.6.0 +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes + +''' + + +EXAMPLES = """ +- name: Make sure we have a parent entry for users + community.general.ldap_entry: + dn: ou=users,dc=example,dc=com + objectClass: organizationalUnit + +- name: Make sure we have an admin user + community.general.ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP administrator + userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + +- name: Set possible values for attributes elements + community.general.ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP Administrator + roleOccupant: + - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com + - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + +- name: Get rid of an old entry + community.general.ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + server_uri: ldap://localhost/ + bind_dn: cn=admin,dc=example,dc=com + bind_pw: password + +# +# The same as in the previous example but with the authentication details +# stored in the ldap_auth variable: +# +# ldap_auth: +# server_uri: ldap://localhost/ +# bind_dn: cn=admin,dc=example,dc=com +# bind_pw: password +# +# In the example below, 'args' is a task keyword, passed at the same level as the module +- name: Get rid of an old entry + community.general.ldap_entry: + dn: ou=stuff,dc=example,dc=com + state: absent + args: "{{ ldap_auth }}" +""" + + +RETURN = """ +# Default return values +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap.modlist + import ldap.controls + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapEntry(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.state = self.module.params['state'] + self.recursive = self.module.params['recursive'] + + # Add the objectClass into the list of attributes + self.module.params['attributes']['objectClass'] = ( + self.module.params['objectClass']) + + # Load attributes + if self.state == 'present': + self.attrs = self._load_attrs() + + def _load_attrs(self): + """ Turn attribute's value to array. """ + attrs = {} + + for name, value in self.module.params['attributes'].items(): + if isinstance(value, list): + attrs[name] = list(map(to_bytes, value)) + else: + attrs[name] = [to_bytes(value)] + + return attrs + + def add(self): + """ If self.dn does not exist, returns a callable that will add it. """ + def _add(): + self.connection.add_s(self.dn, modlist) + + if not self._is_entry_present(): + modlist = ldap.modlist.addModlist(self.attrs) + action = _add + else: + action = None + + return action + + def delete(self): + """ If self.dn exists, returns a callable that will delete either + the item itself if the recursive option is not set or the whole branch + if it is. """ + def _delete(): + self.connection.delete_s(self.dn) + + def _delete_recursive(): + """ Attempt recurive deletion using the subtree-delete control. + If that fails, do it manually. """ + try: + subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805') + self.connection.delete_ext_s(self.dn, serverctrls=[subtree_delete]) + except ldap.NOT_ALLOWED_ON_NONLEAF: + search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=('dn',)) + search.reverse() + for entry in search: + self.connection.delete_s(entry[0]) + + if self._is_entry_present(): + if self.recursive: + action = _delete_recursive + else: + action = _delete + else: + action = None + + return action + + def _is_entry_present(self): + try: + self.connection.search_s(self.dn, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + is_present = False + else: + is_present = True + + return is_present + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attributes=dict(default={}, type='dict'), + objectClass=dict(type='list', elements='str'), + state=dict(default='present', choices=['present', 'absent']), + recursive=dict(default=False, type='bool'), + ), + required_if=[('state', 'present', ['objectClass'])], + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + state = module.params['state'] + + # Instantiate the LdapEntry object + ldap = LdapEntry(module) + + # Get the action function + if state == 'present': + action = ldap.add() + elif state == 'absent': + action = ldap.delete() + + # Perform the action + if action is not None and not module.check_mode: + try: + action() + except Exception as e: + module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=(action is not None)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/ansible_collections/community/general/plugins/modules/ldap_passwd.py new file mode 100644 index 000000000..f47fa330e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ldap_passwd.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018, Keller Fuchs +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ldap_passwd +short_description: Set passwords in LDAP +description: + - Set a password for an LDAP entry. This module only asserts that + a given password is valid for a given entry. To assert the + existence of an entry, see M(community.general.ldap_entry). +notes: + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a cn=peercred,cn=external,cn=auth ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). +author: + - Keller Fuchs (@KellerFuchs) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + passwd: + description: + - The (plaintext) password to be set for I(dn). + type: str +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes + +''' + +EXAMPLES = """ +- name: Set a password for the admin user + community.general.ldap_passwd: + dn: cn=admin,dc=example,dc=com + passwd: "{{ vault_secret }}" + +- name: Setting passwords in bulk + community.general.ldap_passwd: + dn: "{{ item.key }}" + passwd: "{{ item.value }}" + with_dict: + alice: alice123123 + bob: "|30b!" + admin: "{{ vault_secret }}" +""" + +RETURN = """ +modlist: + description: list of modified parameters + returned: success + type: list + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +class LdapPasswd(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + # Shortcuts + self.passwd = self.module.params['passwd'] + + def passwd_check(self): + try: + tmp_con = ldap.initialize(self.server_uri) + except ldap.LDAPError as e: + self.fail("Cannot initialize LDAP connection", e) + + if self.start_tls: + try: + tmp_con.start_tls_s() + except ldap.LDAPError as e: + self.fail("Cannot start TLS.", e) + + try: + tmp_con.simple_bind_s(self.dn, self.passwd) + except ldap.INVALID_CREDENTIALS: + return True + except ldap.LDAPError as e: + self.fail("Cannot bind to the server.", e) + else: + return False + finally: + tmp_con.unbind() + + def passwd_set(self): + # Exit early if the password is already valid + if not self.passwd_check(): + return False + + # Change the password (or throw an exception) + try: + self.connection.passwd_s(self.dn, None, self.passwd) + except ldap.LDAPError as e: + self.fail("Unable to set password", e) + + # Password successfully changed + return True + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs(passwd=dict(no_log=True)), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + ldap = LdapPasswd(module) + + if module.check_mode: + module.exit_json(changed=ldap.passwd_check()) + + module.exit_json(changed=ldap.passwd_set()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ldap_search.py b/ansible_collections/community/general/plugins/modules/ldap_search.py new file mode 100644 index 000000000..ad79a2d73 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ldap_search.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2020, Sebastian Pfahl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ldap_search +version_added: '0.2.0' +short_description: Search for entries in a LDAP server +description: + - Return the results of an LDAP search. +notes: + - The default authentication settings will attempt to use a SASL EXTERNAL + bind over a UNIX domain socket. This works well with the default Ubuntu + install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL + rule allowing root to modify the server configuration. If you need to use + a simple bind to access your server, pass the credentials in I(bind_dn) + and I(bind_pw). +author: + - Sebastian Pfahl (@eryx12o45) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dn: + required: true + type: str + description: + - The LDAP DN to search in. + scope: + choices: [base, onelevel, subordinate, children] + default: base + type: str + description: + - The LDAP scope to use. + filter: + default: '(objectClass=*)' + type: str + description: + - Used for filtering the LDAP search result. + attrs: + type: list + elements: str + description: + - A list of attributes for limiting the result. Use an + actual list or a comma-separated string. + schema: + default: false + type: bool + description: + - Set to C(true) to return the full attribute schema of entries, not + their attribute values. Overrides I(attrs) when provided. +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Return all entries within the 'groups' organizational unit. + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + register: ldap_groups + +- name: Return GIDs for all groups + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + scope: "onelevel" + attrs: + - "gidNumber" + register: ldap_group_gids +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + dn=dict(type='str', required=True), + scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), + filter=dict(type='str', default='(objectClass=*)'), + attrs=dict(type='list', elements='str'), + schema=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + try: + LdapSearch(module).main() + except Exception as exception: + module.fail_json(msg="Attribute action failed.", details=to_native(exception)) + + module.exit_json(changed=False) + + +def _extract_entry(dn, attrs): + extracted = {'dn': dn} + for attr, val in list(attrs.items()): + if len(val) == 1: + extracted[attr] = val[0] + else: + extracted[attr] = val + return extracted + + +class LdapSearch(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + self.filterstr = self.module.params['filter'] + self.attrlist = [] + self._load_scope() + self._load_attrs() + self._load_schema() + + def _load_schema(self): + self.schema = self.module.boolean(self.module.params['schema']) + if self.schema: + self.attrsonly = 1 + else: + self.attrsonly = 0 + + def _load_scope(self): + spec = dict( + base=ldap.SCOPE_BASE, + onelevel=ldap.SCOPE_ONELEVEL, + subordinate=ldap.SCOPE_SUBORDINATE, + children=ldap.SCOPE_SUBTREE, + ) + self.scope = spec[self.module.params['scope']] + + def _load_attrs(self): + self.attrlist = self.module.params['attrs'] or None + + def main(self): + results = self.perform_search() + self.module.exit_json(changed=False, results=results) + + def perform_search(self): + try: + results = self.connection.search_s( + self.dn, + self.scope, + filterstr=self.filterstr, + attrlist=self.attrlist, + attrsonly=self.attrsonly + ) + ldap_entries = [] + for result in results: + if isinstance(result[1], dict): + if self.schema: + ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) + else: + ldap_entries.append(_extract_entry(result[0], result[1])) + return ldap_entries + except ldap.NO_SUCH_OBJECT: + self.module.fail_json(msg="Base not found: {0}".format(self.dn)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/librato_annotation.py b/ansible_collections/community/general/plugins/modules/librato_annotation.py new file mode 100644 index 000000000..ebfb75154 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/librato_annotation.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Seth Edwards, 2014 +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: librato_annotation +short_description: Create an annotation in librato +description: + - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically +author: "Seth Edwards (@Sedward)" +requirements: [] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + user: + type: str + description: + - Librato account username + required: true + api_key: + type: str + description: + - Librato account api key + required: true + name: + type: str + description: + - The annotation stream name + - If the annotation stream does not exist, it will be created automatically + required: false + title: + type: str + description: + - The title of an annotation is a string and may contain spaces + - The title should be a short, high-level summary of the annotation e.g. v45 Deployment + required: true + source: + type: str + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population + required: false + description: + type: str + description: + - The description contains extra metadata about a particular annotation + - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! + required: false + start_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation started + required: false + end_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation ended + - For events that have a duration, this is a useful way to annotate the duration of the event + required: false + links: + type: list + elements: dict + description: + - See examples +''' + +EXAMPLES = ''' +- name: Create a simple annotation event with a source + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXX + title: App Config Change + source: foo.bar + description: This is a detailed description of the config change + +- name: Create an annotation that includes a link + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: code.deploy + title: app code deploy + description: this is a detailed description of a deployment + links: + - rel: example + href: http://www.example.com/deploy + +- name: Create an annotation with a start_time and end_time + community.general.librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: maintenance + title: Maintenance window + description: This is a detailed description of maintenance + start_time: 1395940006 + end_time: 1395954406 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def post_annotation(module): + user = module.params['user'] + api_key = module.params['api_key'] + name = module.params['name'] + title = module.params['title'] + + url = 'https://metrics-api.librato.com/v1/annotations/%s' % name + params = {} + params['title'] = title + + if module.params['source'] is not None: + params['source'] = module.params['source'] + if module.params['description'] is not None: + params['description'] = module.params['description'] + if module.params['start_time'] is not None: + params['start_time'] = module.params['start_time'] + if module.params['end_time'] is not None: + params['end_time'] = module.params['end_time'] + if module.params['links'] is not None: + params['links'] = module.params['links'] + + json_body = module.jsonify(params) + + headers = {} + headers['Content-Type'] = 'application/json' + + # Hack send parameters the way fetch_url wants them + module.params['url_username'] = user + module.params['url_password'] = api_key + response, info = fetch_url(module, url, data=json_body, headers=headers) + response_code = str(info['status']) + response_body = info['body'] + if info['status'] != 201: + if info['status'] >= 400: + module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body) + else: + module.fail_json(msg="Request Failed. Response code: " + response_code) + response = response.read() + module.exit_json(changed=True, annotation=response) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + api_key=dict(required=True, no_log=True), + name=dict(required=False), + title=dict(required=True), + source=dict(required=False), + description=dict(required=False), + start_time=dict(required=False, default=None, type='int'), + end_time=dict(required=False, default=None, type='int'), + links=dict(type='list', elements='dict') + ) + ) + + post_annotation(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/linode.py b/ansible_collections/community/general/plugins/modules/linode.py new file mode 100644 index 000000000..404e7a393 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/linode.py @@ -0,0 +1,691 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: linode +short_description: Manage instances on the Linode Public Cloud +description: + - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the resource + choices: [ absent, active, deleted, present, restarted, started, stopped ] + default: present + type: str + api_key: + description: + - Linode API key. + - C(LINODE_API_KEY) env variable can be used instead. + type: str + required: true + name: + description: + - Name to give the instance (alphanumeric, dashes, underscore). + - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). + required: true + type: str + displaygroup: + description: + - Add the instance to a Display Group in Linode Manager. + type: str + default: '' + linode_id: + description: + - Unique ID of a linode server. This value is read-only in the sense that + if you specify it on creation of a Linode it will not be used. The + Linode API generates these IDs and we can those generated value here to + reference a Linode more specifically. This is useful for idempotence. + aliases: [ lid ] + type: int + additional_disks: + description: + - List of dictionaries for creating additional disks that are added to the Linode configuration settings. + - Dictionary takes Size, Label, Type. Size is in MB. + type: list + elements: dict + alert_bwin_enabled: + description: + - Set status of bandwidth in alerts. + type: bool + alert_bwin_threshold: + description: + - Set threshold in MB of bandwidth in alerts. + type: int + alert_bwout_enabled: + description: + - Set status of bandwidth out alerts. + type: bool + alert_bwout_threshold: + description: + - Set threshold in MB of bandwidth out alerts. + type: int + alert_bwquota_enabled: + description: + - Set status of bandwidth quota alerts as percentage of network transfer quota. + type: bool + alert_bwquota_threshold: + description: + - Set threshold in MB of bandwidth quota alerts. + type: int + alert_cpu_enabled: + description: + - Set status of receiving CPU usage alerts. + type: bool + alert_cpu_threshold: + description: + - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. + type: int + alert_diskio_enabled: + description: + - Set status of receiving disk IO alerts. + type: bool + alert_diskio_threshold: + description: + - Set threshold for average IO ops/sec over 2 hour period. + type: int + backupweeklyday: + description: + - Day of the week to take backups. + type: int + backupwindow: + description: + - The time window in which backups will be taken. + type: int + plan: + description: + - plan to use for the instance (Linode plan) + type: int + payment_term: + description: + - payment term to use for the instance (payment term in months) + default: 1 + choices: [ 1, 12, 24 ] + type: int + password: + description: + - root password to apply to a new server (auto generated if missing) + type: str + private_ip: + description: + - Add private IPv4 address when Linode is created. + - Default is C(false). + type: bool + ssh_pub_key: + description: + - SSH public key applied to root user + type: str + swap: + description: + - swap size in MB + default: 512 + type: int + distribution: + description: + - distribution to use for the instance (Linode Distribution) + type: int + datacenter: + description: + - datacenter to create an instance in (Linode Datacenter) + type: int + kernel_id: + description: + - kernel to use for the instance (Linode Kernel) + type: int + wait: + description: + - wait for the instance to be in state C(running) before returning + type: bool + default: true + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 + type: int + watchdog: + description: + - Set status of Lassie watchdog. + type: bool + default: true +requirements: + - python >= 2.6 + - linode-python +author: +- Vincent Viallet (@zbal) +notes: + - Please note, linode-python does not have python 3 support. + - This module uses the now deprecated v3 of the Linode API. + - Please review U(https://www.linode.com/api/linode) for determining the required parameters. +''' + +EXAMPLES = ''' + +- name: Create a new Linode + community.general.linode: + name: linode-test1 + plan: 1 + datacenter: 7 + distribution: 129 + state: present + register: linode_creation + +- name: Create a server with a private IP Address + community.general.linode: + module: linode + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Fully configure new server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 4 + datacenter: 2 + distribution: 99 + kernel_id: 138 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + alert_bwquota_enabled: true + alert_bwquota_threshold: 80 + alert_bwin_enabled: true + alert_bwin_threshold: 10 + alert_cpu_enabled: true + alert_cpu_threshold: 210 + alert_bwout_enabled: true + alert_bwout_threshold: 10 + alert_diskio_enabled: true + alert_diskio_threshold: 10000 + backupweeklyday: 1 + backupwindow: 2 + displaygroup: 'test' + additional_disks: + - {Label: 'disk1', Size: 2500, Type: 'raw'} + - {Label: 'newdisk', Size: 2000} + watchdog: true + delegate_to: localhost + register: linode_creation + +- name: Ensure a running server (create if missing) + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + delegate_to: localhost + register: linode_creation + +- name: Delete a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: absent + delegate_to: localhost + +- name: Stop a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: stopped + delegate_to: localhost + +- name: Reboot a server + community.general.linode: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: restarted + delegate_to: localhost +''' + +import time +import traceback + +LINODE_IMP_ERR = None +try: + from linode import api as linode_api + HAS_LINODE = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback + + +def randompass(): + ''' + Generate a long random password that comply to Linode requirements + ''' + # Linode API currently requires the following: + # It must contain at least two of these four character classes: + # lower case letters - upper case letters - numbers - punctuation + # we play it safe :) + import random + import string + # as of python 2.4, this reseeds the PRNG from urandom + random.seed() + lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) + upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) + number = ''.join(random.choice(string.digits) for x in range(6)) + punct = ''.join(random.choice(string.punctuation) for x in range(6)) + p = lower + upper + number + punct + return ''.join(random.sample(p, len(p))) + + +def getInstanceDetails(api, server): + ''' + Return the details of an instance, populating IPs, etc. + ''' + instance = {'id': server['LINODEID'], + 'name': server['LABEL'], + 'public': [], + 'private': []} + + # Populate with ips + for ip in api.linode_ip_list(LinodeId=server['LINODEID']): + if ip['ISPUBLIC'] and 'ipv4' not in instance: + instance['ipv4'] = ip['IPADDRESS'] + instance['fqdn'] = ip['RDNS_NAME'] + if ip['ISPUBLIC']: + instance['public'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + else: + instance['private'].append({'ipv4': ip['IPADDRESS'], + 'fqdn': ip['RDNS_NAME'], + 'ip_id': ip['IPADDRESSID']}) + return instance + + +def linodeServers(module, api, state, name, + displaygroup, plan, additional_disks, distribution, + datacenter, kernel_id, linode_id, payment_term, password, + private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs): + instances = [] + changed = False + new_server = False + servers = [] + disks = [] + configs = [] + jobs = [] + + # See if we can match an existing server details with the provided linode_id + if linode_id: + # For the moment we only consider linode_id as criteria for match + # Later we can use more (size, name, etc.) and update existing + servers = api.linode_list(LinodeId=linode_id) + # Attempt to fetch details about disks and configs only if servers are + # found with linode_id + if servers: + disks = api.linode_disk_list(LinodeId=linode_id) + configs = api.linode_config_list(LinodeId=linode_id) + + # Act on the state + if state in ('active', 'present', 'started'): + # TODO: validate all the plan / distribution / datacenter are valid + + # Multi step process/validation: + # - need linode_id (entity) + # - need disk_id for linode_id - create disk from distrib + # - need config_id for linode_id - create config (need kernel) + + # Any create step triggers a job that need to be waited for. + if not servers: + for arg in (name, plan, distribution, datacenter): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create linode entity + new_server = True + + # Get size of all individually listed disks to subtract from Distribution disk + used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks) + + try: + res = api.linode_create(DatacenterID=datacenter, PlanID=plan, + PaymentTerm=payment_term) + linode_id = res['LinodeID'] + # Update linode Label to match name + api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name)) + # Update Linode with Ansible configuration options + api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs) + # Save server + servers = api.linode_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + + # Add private IP to Linode + if private_ip: + try: + res = api.linode_ip_addprivate(LinodeID=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + if not disks: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + # Create disks (1 from distrib, 1 for SWAP) + new_server = True + try: + if not password: + # Password is required on creation, if not provided generate one + password = randompass() + if not swap: + swap = 512 + # Create data disk + size = servers[0]['TOTALHD'] - used_disk_space - swap + + if ssh_pub_key: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, rootSSHKey=ssh_pub_key, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + else: + res = api.linode_disk_createfromdistribution( + LinodeId=linode_id, DistributionID=distribution, + rootPass=password, + Label='%s data disk (lid: %s)' % (name, linode_id), + Size=size) + jobs.append(res['JobID']) + # Create SWAP disk + res = api.linode_disk_create(LinodeId=linode_id, Type='swap', + Label='%s swap disk (lid: %s)' % (name, linode_id), + Size=swap) + # Create individually listed disks at specified size + if additional_disks: + for disk in additional_disks: + # If a disk Type is not passed in, default to ext4 + if disk.get('Type') is None: + disk['Type'] = 'ext4' + res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type']) + + jobs.append(res['JobID']) + except Exception as e: + # TODO: destroy linode ? + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + if not configs: + for arg in (name, linode_id, distribution): + if not arg: + module.fail_json(msg='%s is required for %s state' % (arg, state)) + + # Check architecture + for distrib in api.avail_distributions(): + if distrib['DISTRIBUTIONID'] != distribution: + continue + arch = '32' + if distrib['IS64BIT']: + arch = '64' + break + + # Get latest kernel matching arch if kernel_id is not specified + if not kernel_id: + for kernel in api.avail_kernels(): + if not kernel['LABEL'].startswith('Latest %s' % arch): + continue + kernel_id = kernel['KERNELID'] + break + + # Get disk list + disks_id = [] + for disk in api.linode_disk_list(LinodeId=linode_id): + if disk['TYPE'] == 'ext3': + disks_id.insert(0, str(disk['DISKID'])) + continue + disks_id.append(str(disk['DISKID'])) + # Trick to get the 9 items in the list + while len(disks_id) < 9: + disks_id.append('') + disks_list = ','.join(disks_id) + + # Create config + new_server = True + try: + api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, + Disklist=disks_list, Label='%s config' % name) + configs = api.linode_config_list(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + # Start / Ensure servers are running + for server in servers: + # Refresh server state + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # Ensure existing servers are up and running, boot if necessary + if server['STATUS'] != 1: + res = api.linode_boot(LinodeId=linode_id) + jobs.append(res['JobID']) + changed = True + + # wait here until the instances are up + wait_timeout = time.time() + wait_timeout + while wait and wait_timeout > time.time(): + # refresh the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + # status: + # -2: Boot failed + # 1: Running + if server['STATUS'] in (-2, 1): + break + time.sleep(5) + if wait and wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) + # Get a fresh copy of the server details + server = api.linode_list(LinodeId=server['LINODEID'])[0] + if server['STATUS'] == -2: + module.fail_json(msg='%s (lid: %s) failed to boot' % + (server['LABEL'], server['LINODEID'])) + # From now on we know the task is a success + # Build instance report + instance = getInstanceDetails(api, server) + # depending on wait flag select the status + if wait: + instance['status'] = 'Running' + else: + instance['status'] = 'Starting' + + # Return the root password if this is a new box and no SSH key + # has been provided + if new_server and not ssh_pub_key: + instance['password'] = password + instances.append(instance) + + elif state in ('stopped',): + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + if server['STATUS'] != 2: + try: + res = api.linode_shutdown(LinodeId=linode_id) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Stopping' + changed = True + else: + instance['status'] = 'Stopped' + instances.append(instance) + + elif state in ('restarted',): + if not servers: + module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) + + for server in servers: + instance = getInstanceDetails(api, server) + try: + res = api.linode_reboot(LinodeId=server['LINODEID']) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Restarting' + changed = True + instances.append(instance) + + elif state in ('absent', 'deleted'): + for server in servers: + instance = getInstanceDetails(api, server) + try: + api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + instance['status'] = 'Deleting' + changed = True + instances.append(instance) + + # Ease parsing if only 1 instance + if len(instances) == 1: + module.exit_json(changed=changed, instance=instances[0]) + + module.exit_json(changed=changed, instances=instances) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', + choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), + api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), + name=dict(type='str', required=True), + alert_bwin_enabled=dict(type='bool'), + alert_bwin_threshold=dict(type='int'), + alert_bwout_enabled=dict(type='bool'), + alert_bwout_threshold=dict(type='int'), + alert_bwquota_enabled=dict(type='bool'), + alert_bwquota_threshold=dict(type='int'), + alert_cpu_enabled=dict(type='bool'), + alert_cpu_threshold=dict(type='int'), + alert_diskio_enabled=dict(type='bool'), + alert_diskio_threshold=dict(type='int'), + backupweeklyday=dict(type='int'), + backupwindow=dict(type='int'), + displaygroup=dict(type='str', default=''), + plan=dict(type='int'), + additional_disks=dict(type='list', elements='dict'), + distribution=dict(type='int'), + datacenter=dict(type='int'), + kernel_id=dict(type='int'), + linode_id=dict(type='int', aliases=['lid']), + payment_term=dict(type='int', default=1, choices=[1, 12, 24]), + password=dict(type='str', no_log=True), + private_ip=dict(type='bool'), + ssh_pub_key=dict(type='str'), + swap=dict(type='int', default=512), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=300), + watchdog=dict(type='bool', default=True), + ), + required_if=[ + ('state', 'restarted', ['linode_id']), + ('state', 'stopped', ['linode_id']), + ] + ) + + if not HAS_LINODE: + module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR) + + state = module.params.get('state') + api_key = module.params.get('api_key') + name = module.params.get('name') + alert_bwin_enabled = module.params.get('alert_bwin_enabled') + alert_bwin_threshold = module.params.get('alert_bwin_threshold') + alert_bwout_enabled = module.params.get('alert_bwout_enabled') + alert_bwout_threshold = module.params.get('alert_bwout_threshold') + alert_bwquota_enabled = module.params.get('alert_bwquota_enabled') + alert_bwquota_threshold = module.params.get('alert_bwquota_threshold') + alert_cpu_enabled = module.params.get('alert_cpu_enabled') + alert_cpu_threshold = module.params.get('alert_cpu_threshold') + alert_diskio_enabled = module.params.get('alert_diskio_enabled') + alert_diskio_threshold = module.params.get('alert_diskio_threshold') + backupweeklyday = module.params.get('backupweeklyday') + backupwindow = module.params.get('backupwindow') + displaygroup = module.params.get('displaygroup') + plan = module.params.get('plan') + additional_disks = module.params.get('additional_disks') + distribution = module.params.get('distribution') + datacenter = module.params.get('datacenter') + kernel_id = module.params.get('kernel_id') + linode_id = module.params.get('linode_id') + payment_term = module.params.get('payment_term') + password = module.params.get('password') + private_ip = module.params.get('private_ip') + ssh_pub_key = module.params.get('ssh_pub_key') + swap = module.params.get('swap') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + watchdog = int(module.params.get('watchdog')) + + check_items = dict( + alert_bwin_enabled=alert_bwin_enabled, + alert_bwin_threshold=alert_bwin_threshold, + alert_bwout_enabled=alert_bwout_enabled, + alert_bwout_threshold=alert_bwout_threshold, + alert_bwquota_enabled=alert_bwquota_enabled, + alert_bwquota_threshold=alert_bwquota_threshold, + alert_cpu_enabled=alert_cpu_enabled, + alert_cpu_threshold=alert_cpu_threshold, + alert_diskio_enabled=alert_diskio_enabled, + alert_diskio_threshold=alert_diskio_threshold, + backupweeklyday=backupweeklyday, + backupwindow=backupwindow, + ) + + kwargs = dict((k, v) for k, v in check_items.items() if v is not None) + + # setup the auth + try: + api = linode_api.Api(api_key) + api.test_echo() + except Exception as e: + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) + + linodeServers(module, api, state, name, + displaygroup, plan, + additional_disks, distribution, datacenter, kernel_id, linode_id, + payment_term, password, private_ip, ssh_pub_key, swap, wait, + wait_timeout, watchdog, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/linode_v4.py b/ansible_collections/community/general/plugins/modules/linode_v4.py new file mode 100644 index 000000000..f213af125 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/linode_v4.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: linode_v4 +short_description: Manage instances on the Linode cloud +description: Manage instances on the Linode cloud. +requirements: + - python >= 2.7 + - linode_api4 >= 2.0.0 +author: + - Luke Murphy (@decentral1se) +notes: + - No Linode resizing is currently implemented. This module will, in time, + replace the current Linode module which uses deprecated API bindings on the + Linode side. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + region: + description: + - The region of the instance. This is a required parameter only when + creating Linode instances. See + U(https://www.linode.com/docs/api/regions/). + type: str + image: + description: + - The image of the instance. This is a required parameter only when + creating Linode instances. See + U(https://www.linode.com/docs/api/images/). + type: str + type: + description: + - The type of the instance. This is a required parameter only when + creating Linode instances. See + U(https://www.linode.com/docs/api/linode-types/). + type: str + label: + description: + - The instance label. This label is used as the main determiner for + idempotence for the module and is therefore mandatory. + type: str + required: true + group: + description: + - The group that the instance should be marked under. Please note, that + group labelling is deprecated but still supported. The encouraged + method for marking instances is to use tags. + type: str + private_ip: + description: + - If C(true), the created Linode will have private networking enabled and + assigned a private IPv4 address. + type: bool + default: false + version_added: 3.0.0 + tags: + description: + - The tags that the instance should be marked under. See + U(https://www.linode.com/docs/api/tags/). + type: list + elements: str + root_pass: + description: + - The password for the root user. If not specified, one will be + generated. This generated password will be available in the task + success JSON. + type: str + authorized_keys: + description: + - A list of SSH public key parts to deploy for the root user. + type: list + elements: str + state: + description: + - The desired instance state. + type: str + choices: + - present + - absent + required: true + access_token: + description: + - The Linode API v4 access token. It may also be specified by exposing + the C(LINODE_ACCESS_TOKEN) environment variable. See + U(https://www.linode.com/docs/api#access-and-authentication). + required: true + type: str + stackscript_id: + description: + - The numeric ID of the StackScript to use when creating the instance. + See U(https://www.linode.com/docs/api/stackscripts/). + type: int + version_added: 1.3.0 + stackscript_data: + description: + - An object containing arguments to any User Defined Fields present in + the StackScript used when creating the instance. + Only valid when a stackscript_id is provided. + See U(https://www.linode.com/docs/api/stackscripts/). + type: dict + version_added: 1.3.0 +''' + +EXAMPLES = """ +- name: Create a new Linode. + community.general.linode_v4: + label: new-linode + type: g6-nanode-1 + region: eu-west + image: linode/debian9 + root_pass: passw0rd + authorized_keys: + - "ssh-rsa ..." + stackscript_id: 1337 + stackscript_data: + variable: value + state: present + +- name: Delete that new Linode. + community.general.linode_v4: + label: new-linode + state: absent +""" + +RETURN = """ +instance: + description: The instance description in JSON serialized form. + returned: Always. + type: dict + sample: { + "root_pass": "foobar", # if auto-generated + "alerts": { + "cpu": 90, + "io": 10000, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + } + }, + "created": "2018-09-26T08:12:33", + "group": "Foobar Group", + "hypervisor": "kvm", + "id": 10480444, + "image": "linode/centos7", + "ipv4": [ + "130.132.285.233" + ], + "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", + "label": "lin-foo", + "region": "eu-west", + "specs": { + "disk": 25600, + "memory": 1024, + "transfer": 1000, + "vcpus": 1 + }, + "status": "running", + "tags": [], + "type": "g6-nanode-1", + "updated": "2018-09-26T10:10:14", + "watchdog_enabled": true + } +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent + +LINODE_IMP_ERR = None +try: + from linode_api4 import Instance, LinodeClient + HAS_LINODE_DEPENDENCY = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE_DEPENDENCY = False + + +def create_linode(module, client, **kwargs): + """Creates a Linode instance and handles return format.""" + if kwargs['root_pass'] is None: + kwargs.pop('root_pass') + + try: + response = client.linode.instance_create(**kwargs) + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + try: + if isinstance(response, tuple): + instance, root_pass = response + instance_json = instance._raw_json + instance_json.update({'root_pass': root_pass}) + return instance_json + else: + return response._raw_json + except TypeError: + module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' + ' module on https://github.com/ansible-collections/community.general/issues' + ) + + +def maybe_instance_from_label(module, client): + """Try to retrieve an instance based on a label.""" + try: + label = module.params['label'] + result = client.linode.instances(Instance.label == label) + return result[0] + except IndexError: + return None + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def initialise_module(): + """Initialise the module parameter specification.""" + return AnsibleModule( + argument_spec=dict( + label=dict(type='str', required=True), + state=dict( + type='str', + required=True, + choices=['present', 'absent'] + ), + access_token=dict( + type='str', + required=True, + no_log=True, + fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + ), + authorized_keys=dict(type='list', elements='str', no_log=False), + group=dict(type='str'), + image=dict(type='str'), + private_ip=dict(type='bool', default=False), + region=dict(type='str'), + root_pass=dict(type='str', no_log=True), + tags=dict(type='list', elements='str'), + type=dict(type='str'), + stackscript_id=dict(type='int'), + stackscript_data=dict(type='dict'), + ), + supports_check_mode=False, + required_one_of=( + ['state', 'label'], + ), + required_together=( + ['region', 'image', 'type'], + ) + ) + + +def build_client(module): + """Build a LinodeClient.""" + return LinodeClient( + module.params['access_token'], + user_agent=get_user_agent('linode_v4_module') + ) + + +def main(): + """Module entrypoint.""" + module = initialise_module() + + if not HAS_LINODE_DEPENDENCY: + module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + + client = build_client(module) + instance = maybe_instance_from_label(module, client) + + if module.params['state'] == 'present' and instance is not None: + module.exit_json(changed=False, instance=instance._raw_json) + + elif module.params['state'] == 'present' and instance is None: + instance_json = create_linode( + module, client, + authorized_keys=module.params['authorized_keys'], + group=module.params['group'], + image=module.params['image'], + label=module.params['label'], + private_ip=module.params['private_ip'], + region=module.params['region'], + root_pass=module.params['root_pass'], + tags=module.params['tags'], + ltype=module.params['type'], + stackscript=module.params['stackscript_id'], + stackscript_data=module.params['stackscript_data'], + ) + module.exit_json(changed=True, instance=instance_json) + + elif module.params['state'] == 'absent' and instance is not None: + instance.delete() + module.exit_json(changed=True, instance=instance._raw_json) + + elif module.params['state'] == 'absent' and instance is None: + module.exit_json(changed=False, instance={}) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py new file mode 100644 index 000000000..bc630e1d2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py @@ -0,0 +1,428 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2017, Nathan Davison +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: listen_ports_facts +author: + - Nathan Davison (@ndavison) +description: + - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. + - This module currently supports Linux only. +requirements: + - netstat or ss +short_description: Gather facts on processes listening on TCP and UDP ports +notes: + - | + C(ss) returns all processes for each listen address and port. + This plugin will return each of them, so multiple entries for the same listen address and port are likely in results. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + command: + description: + - Override which command to use for fetching listen ports. + - 'By default module will use first found supported command on the system (in alphanumerical order).' + type: str + choices: + - netstat + - ss + version_added: 4.1.0 + include_non_listening: + description: + - Show both listening and non-listening sockets (for TCP this means established connections). + - Adds the return values C(state) and C(foreign_address) to the returned facts. + type: bool + default: false + version_added: 5.4.0 +''' + +EXAMPLES = r''' +- name: Gather facts on listening ports + community.general.listen_ports_facts: + +- name: TCP whitelist violation + ansible.builtin.debug: + msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist + vars: + tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}" + tcp_whitelist: + - 22 + - 25 + loop: "{{ tcp_listen_violations }}" + +- name: List TCP ports + ansible.builtin.debug: + msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}" + +- name: List UDP ports + ansible.builtin.debug: + msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}" + +- name: List all ports + ansible.builtin.debug: + msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}" + +- name: Gather facts on all ports and override which command to use + community.general.listen_ports_facts: + command: 'netstat' + include_non_listening: true +''' + +RETURN = r''' +ansible_facts: + description: Dictionary containing details of TCP and UDP ports with listening servers + returned: always + type: complex + contains: + tcp_listen: + description: A list of processes that are listening on a TCP port. + returned: if TCP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if I(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. + returned: if I(include_non_listening=true) + type: str + sample: "ESTABLISHED" + version_added: 5.4.0 + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "mysqld" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 1223 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 3306 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "tcp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "mysql" + udp_listen: + description: A list of processes that are listening on a UDP port. + returned: if UDP servers were found + type: list + contains: + address: + description: The address the server is listening on. + returned: always + type: str + sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if I(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. UDP is a connectionless protocol. Shows UCONN or ESTAB. + returned: if I(include_non_listening=true) + type: str + sample: "UCONN" + version_added: 5.4.0 + name: + description: The name of the listening process. + returned: if user permissions allow + type: str + sample: "rsyslogd" + pid: + description: The pid of the listening process. + returned: always + type: int + sample: 609 + port: + description: The port the server is listening on. + returned: always + type: int + sample: 514 + protocol: + description: The network protocol of the server. + returned: always + type: str + sample: "udp" + stime: + description: The start time of the listening process. + returned: always + type: str + sample: "Thu Feb 2 13:29:45 2017" + user: + description: The user who is running the listening process. + returned: always + type: str + sample: "root" +''' + +import re +import platform +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule + + +def split_pid_name(pid_name): + """ + Split the entry PID/Program name into the PID (int) and the name (str) + :param pid_name: PID/Program String seperated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd + :return: PID (int) and the program name (str) + """ + try: + pid, name = pid_name.split("/", 1) + except ValueError: + # likely unprivileged user, so add empty name & pid + return 0, "" + else: + name = name.rstrip(":") + return int(pid), name + + +def netStatParse(raw): + """ + The netstat result can be either split in 6,7 or 8 elements depending on the values of state, process and name. + For UDP the state is always empty. For UDP and TCP the process can be empty. + So these cases have to be checked. + :param raw: Netstat raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ + results = list() + for line in raw.splitlines(): + if line.startswith(("tcp", "udp")): + # set variables to default state, in case they are not specified + state = "" + pid_and_name = "" + process = "" + formatted_line = line.split() + protocol, recv_q, send_q, address, foreign_address, rest = \ + formatted_line[0], formatted_line[1], formatted_line[2], formatted_line[3], formatted_line[4], formatted_line[5:] + address, port = address.rsplit(":", 1) + + if protocol.startswith("tcp"): + # nestat distinguishes between tcp6 and tcp + protocol = "tcp" + if len(rest) == 3: + state, pid_and_name, process = rest + if len(rest) == 2: + state, pid_and_name = rest + + if protocol.startswith("udp"): + # safety measure, similar to tcp6 + protocol = "udp" + if len(rest) == 2: + pid_and_name, process = rest + if len(rest) == 1: + pid_and_name = rest[0] + + pid, name = split_pid_name(pid_name=pid_and_name) + result = { + 'protocol': protocol, + 'state': state, + 'address': address, + 'foreign_address': foreign_address, + 'port': int(port), + 'name': name, + 'pid': int(pid), + } + if result not in results: + results.append(result) + return results + + +def ss_parse(raw): + """ + The ss_parse result can be either split in 6 or 7 elements depending on the process column, + e.g. due to unprivileged user. + :param raw: ss raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ + results = list() + regex_conns = re.compile(pattern=r'\[?(.+?)\]?:([0-9]+)$') + regex_pid = re.compile(pattern=r'"(.*?)",pid=(\d+)') + + lines = raw.splitlines() + + if len(lines) == 0 or not lines[0].startswith('Netid '): + # unexpected stdout from ss + raise EnvironmentError('Unknown stdout format of `ss`: {0}'.format(raw)) + + # skip headers (-H arg is not present on e.g. Ubuntu 16) + lines = lines[1:] + + for line in lines: + cells = line.split(None, 6) + try: + if len(cells) == 6: + # no process column, e.g. due to unprivileged user + process = str() + protocol, state, recv_q, send_q, local_addr_port, peer_addr_port = cells + else: + protocol, state, recv_q, send_q, local_addr_port, peer_addr_port, process = cells + except ValueError: + # unexpected stdout from ss + raise EnvironmentError( + 'Expected `ss` table layout "Netid, State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port" and \ + optionally "Process", but got something else: {0}'.format(line) + ) + + conns = regex_conns.search(local_addr_port) + pids = regex_pid.findall(process) + if conns is None and pids is None: + continue + + if pids is None: + # likely unprivileged user, so add empty name & pid + # as we do in netstat logic to be consistent with output + pids = [(str(), 0)] + + address = conns.group(1) + port = conns.group(2) + for name, pid in pids: + result = { + 'protocol': protocol, + 'state': state, + 'address': address, + 'foreign_address': peer_addr_port, + 'port': int(port), + 'name': name, + 'pid': int(pid), + } + results.append(result) + return results + + +def main(): + command_args = ['-p', '-l', '-u', '-n', '-t'] + commands_map = { + 'netstat': { + 'args': [], + 'parse_func': netStatParse + }, + 'ss': { + 'args': [], + 'parse_func': ss_parse + }, + } + module = AnsibleModule( + argument_spec=dict( + command=dict(type='str', choices=list(sorted(commands_map))), + include_non_listening=dict(default=False, type='bool'), + ), + supports_check_mode=True, + ) + + if module.params['include_non_listening']: + command_args = ['-p', '-u', '-n', '-t', '-a'] + + commands_map['netstat']['args'] = command_args + commands_map['ss']['args'] = command_args + + if platform.system() != 'Linux': + module.fail_json(msg='This module requires Linux.') + + def getPidSTime(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)]) + stime = '' + if rc == 0: + for line in ps_output.splitlines(): + if 'started' not in line: + stime = line + return stime + + def getPidUser(pid): + ps_cmd = module.get_bin_path('ps', True) + rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)]) + user = '' + if rc == 0: + for line in ps_output.splitlines(): + if line != 'USER': + user = line + return user + + result = { + 'changed': False, + 'ansible_facts': { + 'tcp_listen': [], + 'udp_listen': [], + }, + } + + try: + command = None + bin_path = None + if module.params['command'] is not None: + command = module.params['command'] + bin_path = module.get_bin_path(command, required=True) + else: + for c in sorted(commands_map): + bin_path = module.get_bin_path(c, required=False) + if bin_path is not None: + command = c + break + + if bin_path is None: + raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) + + # which ports are listening for connections? + args = commands_map[command]['args'] + rc, stdout, stderr = module.run_command([bin_path] + args) + if rc == 0: + parse_func = commands_map[command]['parse_func'] + results = parse_func(stdout) + + for connection in results: + # only display state and foreign_address for include_non_listening. + if not module.params['include_non_listening']: + connection.pop('state', None) + connection.pop('foreign_address', None) + connection['stime'] = getPidSTime(connection['pid']) + connection['user'] = getPidUser(connection['pid']) + if connection['protocol'].startswith('tcp'): + result['ansible_facts']['tcp_listen'].append(connection) + elif connection['protocol'].startswith('udp'): + result['ansible_facts']['udp_listen'].append(connection) + except (KeyError, EnvironmentError) as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lldp.py b/ansible_collections/community/general/plugins/modules/lldp.py new file mode 100644 index 000000000..fb608ff13 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lldp.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lldp +requirements: [ lldpctl ] +short_description: Get details reported by lldp +description: + - Reads data out of lldpctl +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: {} +author: "Andy Hill (@andyhky)" +notes: + - Requires lldpd running and lldp enabled on switches +''' + +EXAMPLES = ''' +# Retrieve switch/port information + - name: Gather information from lldp + community.general.lldp: + + - name: Print each switch/port + ansible.builtin.debug: + msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" + with_items: "{{ lldp.keys() }}" + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} + +''' + +from ansible.module_utils.basic import AnsibleModule + + +def gather_lldp(module): + cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] + rc, output, err = module.run_command(cmd) + if output: + output_dict = {} + current_dict = {} + lldp_entries = output.split("\n") + + for entry in lldp_entries: + if entry.startswith('lldp'): + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + current_dict = current_dict[path_component] + current_dict[final] = value + return output_dict + + +def main(): + module = AnsibleModule({}) + + lldp_output = gather_lldp(module) + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/locale_gen.py b/ansible_collections/community/general/plugins/modules/locale_gen.py new file mode 100644 index 000000000..fccdf977a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/locale_gen.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +author: + - Augustus Kling (@AugustusKling) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + state: + type: str + description: + - Whether the locale shall be present. + choices: [ absent, present ] + default: present +''' + +EXAMPLES = ''' +- name: Ensure a locale exists + community.general.locale_gen: + name: de_CH.UTF-8 + state: present +''' + +import os +import re +from subprocess import Popen, PIPE, call + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +# =========================================== +# location module specific support methods. +# + +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = r'^(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = r'^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + fd = open(__locales_available, 'r') + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + fd.close() + return False + + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + output = to_native(output) + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + try: + f = open("/etc/locale.gen", "r") + lines = [line.replace(existing_line, new_line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = r'#{0,1}\s*%s (?P.+)' % name + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + try: + f = open("/etc/locale.gen", "r") + lines = [re.sub(search_string, new_string, line) for line in f] + finally: + f.close() + try: + f = open("/etc/locale.gen", "w") + f.write("".join(lines)) + finally: + f.close() + + +def apply_change(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + set_locale(name, enabled=True) + else: + # Delete locale. + set_locale(name, enabled=False) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def apply_change_ubuntu(targetState, name): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + """ + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + try: + f = open("/var/lib/locales/supported.d/local", "r") + content = f.readlines() + finally: + f.close() + try: + f = open("/var/lib/locales/supported.d/local", "w") + for line in content: + locale, charset = line.split(' ') + if locale != name: + f.write(line) + finally: + f.close() + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue != 0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + if not os.path.exists("/var/lib/locales/supported.d/"): + if os.path.exists("/etc/locale.gen"): + # We found the common way to manage locales. + ubuntuMode = False + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") + else: + # Ubuntu created its own system to manage locales. + ubuntuMode = True + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locale you've entered is not available " + "on your system.") + + if is_present(name): + prev_state = "present" + else: + prev_state = "absent" + changed = (prev_state != state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + if changed: + try: + if ubuntuMode is False: + apply_change(state, name) + else: + apply_change_ubuntu(state, name) + except EnvironmentError as e: + module.fail_json(msg=to_native(e), exitValue=e.errno) + + module.exit_json(name=name, changed=changed, msg="OK") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/logentries.py b/ansible_collections/community/general/plugins/modules/logentries.py new file mode 100644 index 000000000..f177cf454 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/logentries.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Ivan Vanderbyl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: logentries +author: "Ivan Vanderbyl (@ivanvanderbyl)" +short_description: Module for tracking logs via logentries.com +description: + - Sends logs to LogEntries in realtime +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + type: str + description: + - path to a log file + required: true + state: + type: str + description: + - following state of the log + choices: [ 'present', 'absent', 'followed', 'unfollowed' ] + required: false + default: present + name: + type: str + description: + - name of the log + required: false + logtype: + type: str + description: + - type of the log + required: false + aliases: [type] + +notes: + - Requires the LogEntries agent which can be installed following the instructions at logentries.com +''' +EXAMPLES = ''' +- name: Track nginx logs + community.general.logentries: + path: /var/log/nginx/access.log + state: present + name: nginx-access-log + +- name: Stop tracking nginx logs + community.general.logentries: + path: /var/log/nginx/error.log + state: absent +''' + +from ansible.module_utils.basic import AnsibleModule + + +def query_log_status(module, le_path, path, state="present"): + """ Returns whether a log is followed or not. """ + + if state == "present": + rc, out, err = module.run_command([le_path, "followed", path]) + if rc == 0: + return True + + return False + + +def follow_log(module, le_path, logs, name=None, logtype=None): + """ Follows one or more logs if not already followed. """ + + followed_count = 0 + + for log in logs: + if query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + + cmd = [le_path, 'follow', log] + if name: + cmd.extend(['--name', name]) + if logtype: + cmd.extend(['--type', logtype]) + rc, out, err = module.run_command(cmd) + + if not query_log_status(module, le_path, log): + module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) + + followed_count += 1 + + if followed_count > 0: + module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) + + module.exit_json(changed=False, msg="logs(s) already followed") + + +def unfollow_log(module, le_path, logs): + """ Unfollows one or more logs if followed. """ + + removed_count = 0 + + # Using a for loop in case of error, we can report the package that failed + for log in logs: + # Query the log first, to see if we even need to remove. + if not query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'rm', log]) + + if query_log_status(module, le_path, log): + module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) + + removed_count += 1 + + if removed_count > 0: + module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) + + module.exit_json(changed=False, msg="logs(s) already unfollowed") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True), + state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), + name=dict(required=False, default=None, type='str'), + logtype=dict(required=False, default=None, type='str', aliases=['type']) + ), + supports_check_mode=True + ) + + le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + + p = module.params + + # Handle multiple log files + logs = p["path"].split(",") + logs = filter(None, logs) + + if p["state"] in ["present", "followed"]: + follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) + + elif p["state"] in ["absent", "unfollowed"]: + unfollow_log(module, le_path, logs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/logentries_msg.py b/ansible_collections/community/general/plugins/modules/logentries_msg.py new file mode 100644 index 000000000..03851ad1f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/logentries_msg.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: logentries_msg +short_description: Send a message to logentries +description: + - Send a message to logentries +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - Log token. + required: true + msg: + type: str + description: + - The message body. + required: true + api: + type: str + description: + - API endpoint + default: data.logentries.com + port: + type: int + description: + - API endpoint port + default: 80 +author: "Jimmy Tang (@jcftang) " +''' + +RETURN = '''# ''' + +EXAMPLES = ''' +- name: Send a message to logentries + community.general.logentries_msg: + token=00000000-0000-0000-0000-000000000000 + msg="{{ ansible_hostname }}" +''' + +import socket + +from ansible.module_utils.basic import AnsibleModule + + +def send_msg(module, token, msg, api, port): + + message = "{0} {1}\n".format(token, msg) + + api_ip = socket.gethostbyname(api) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((api_ip, port)) + try: + if not module.check_mode: + s.send(message) + except Exception as e: + module.fail_json(msg="failed to send message, msg=%s" % e) + s.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + token=dict(type='str', required=True, no_log=True), + msg=dict(type='str', required=True), + api=dict(type='str', default="data.logentries.com"), + port=dict(type='int', default=80)), + supports_check_mode=True + ) + + token = module.params["token"] + msg = module.params["msg"] + api = module.params["api"] + port = module.params["port"] + + changed = False + try: + send_msg(module, token, msg, api, port) + changed = True + except Exception as e: + module.fail_json(msg="unable to send msg: %s" % e) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/logstash_plugin.py b/ansible_collections/community/general/plugins/modules/logstash_plugin.py new file mode 100644 index 000000000..7ee118ff2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/logstash_plugin.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Loic Blot +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: logstash_plugin +short_description: Manage Logstash plugins +description: + - Manages Logstash plugins. +author: Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Install plugin with that name. + required: true + state: + type: str + description: + - Apply plugin state. + choices: ["present", "absent"] + default: present + plugin_bin: + type: path + description: + - Specify logstash-plugin to use for plugin management. + default: /usr/share/logstash/bin/logstash-plugin + proxy_host: + type: str + description: + - Proxy host to use during plugin installation. + proxy_port: + type: str + description: + - Proxy port to use during plugin installation. + version: + type: str + description: + - Specify plugin Version of the plugin to install. + If plugin exists with previous version, it will NOT be updated. +''' + +EXAMPLES = ''' +- name: Install Logstash beats input plugin + community.general.logstash_plugin: + state: present + name: logstash-input-beats + +- name: Install specific version of a plugin + community.general.logstash_plugin: + state: present + name: logstash-input-syslog + version: '3.2.0' + +- name: Uninstall Logstash plugin + community.general.logstash_plugin: + state: absent + name: logstash-filter-multiline + +- name: Install Logstash plugin with alternate heap size + community.general.logstash_plugin: + state: present + name: logstash-input-beats + environment: + LS_JAVA_OPTS: "-Xms256m -Xmx256m" +''' + +from ansible.module_utils.basic import AnsibleModule + + +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) + + +def is_plugin_present(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, "list", plugin_name] + rc, out, err = module.run_command(cmd_args) + return rc == 0 + + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + + +def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] + + if version: + cmd_args.append("--version %s" % version) + + if proxy_host and proxy_port: + cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), + plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), + proxy_host=dict(), + proxy_port=dict(), + version=dict() + ), + supports_check_mode=True + ) + + name = module.params["name"] + state = module.params["state"] + plugin_bin = module.params["plugin_bin"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] + + present = is_plugin_present(module, plugin_bin, name) + + # skip if the state is correct + if (present and state == "present") or (state == "absent" and not present): + module.exit_json(changed=False, name=name, state=state) + + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port) + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lvg.py b/ansible_collections/community/general/plugins/modules/lvg.py new file mode 100644 index 000000000..60eaaa42b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lvg.py @@ -0,0 +1,338 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Alexander Bulimov +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: +- Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. + type: list + elements: str + pesize: + description: + - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector + (where the sector size is the largest sector size of the PVs currently used in the VG), + or at least 128KiB." + - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + default: '' + pvresize: + description: + - If C(true), resize the physical volume to the maximum available size. + type: bool + default: false + version_added: '0.2.0' + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + default: '' + state: + description: + - Control if the volume group exists. + type: str + choices: [ absent, present ] + default: present + force: + description: + - If C(true), allows to remove volume group with logical volumes. + type: bool + default: false +seealso: +- module: community.general.filesystem +- module: community.general.lvol +- module: community.general.parted +notes: + - This module does not modify PE size for already present volume group. +''' + +EXAMPLES = r''' +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + community.general.lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + community.general.lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + community.general.lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 + +- name: Remove a volume group with name vg.services + community.general.lvg: + vg: vg.services + state: absent + +- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible + community.general.lvg: + vg: resizableVG + pvs: /dev/sda3 + pvresize: true +''' + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list', elements='str'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + pvresize=dict(type='bool', default=False), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pvresize = module.boolean(module.params['pvresize']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif state == 'present': + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if state == 'present': + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) + else: + pvs_filter = '' + rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + vgs_cmd = module.get_bin_path('vgs', True) + rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) + + if rc != 0: + module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) + + changed = False + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + if this_vg is None: + if state == 'present': + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, dummy, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg)) + + # resize VG + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if current_devs: + if state == 'present' and pvresize: + for device in current_devs: + pvresize_cmd = module.get_bin_path('pvresize', True) + pvdisplay_cmd = module.get_bin_path('pvdisplay', True) + pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"] + pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops + rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"]) + dev_size = int(dev_size.replace(" ", "")) + rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"]) + pv_size = int(pv_size.replace(" ", "")) + rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"]) + pe_start = int(pe_start.replace(" ", "")) + rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"]) + vg_extent_size = int(vg_extent_size.replace(" ", "")) + if (dev_size - (pe_start + pv_size)) > vg_extent_size: + if module.check_mode: + changed = True + else: + rc, dummy, err = module.run_command([pvresize_cmd, device]) + if rc != 0: + module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err) + else: + changed = True + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lvol.py b/ansible_collections/community/general/plugins/modules/lvol.py new file mode 100644 index 000000000..d193a4e83 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lvol.py @@ -0,0 +1,615 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Jeroen Hoekx , Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: + - Jeroen Hoekx (@jhoekx) + - Alexander Bulimov (@abulimov) + - Raoul Baudach (@unkaputtbar112) + - Ziga Kern (@zigaSRC) +module: lvol +short_description: Configure LVM logical volumes +description: + - This module creates, removes or resizes logical volumes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + type: str + required: true + description: + - The volume group this logical volume is part of. + lv: + type: str + description: + - The name of the logical volume. + size: + type: str + description: + - The size of the logical volume, according to lvcreate(8) --size, by + default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or + according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; + Float values must begin with a digit. + - When resizing, apart from specifying an absolute size you may, according to + lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with + the prefix C(+) or the amount to reduce the logical volume by with prefix C(-). + - Resizing using C(+) or C(-) was not supported prior to community.general 3.0.0. + - Please note that when using C(+) or C(-), the module is B(not idempotent). + state: + type: str + description: + - Control if the logical volume exists. If C(present) and the + volume does not already exist then the C(size) option is required. + choices: [ absent, present ] + default: present + active: + description: + - Whether the volume is active and visible to the host. + type: bool + default: true + force: + description: + - Shrink or remove operations of volumes requires this switch. Ensures that + that filesystems get never corrupted/destroyed by mistake. + type: bool + default: false + opts: + type: str + description: + - Free-form options to be passed to the lvcreate command. + snapshot: + type: str + description: + - The name of the snapshot volume + pvs: + type: str + description: + - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb). + thinpool: + type: str + description: + - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. + shrink: + description: + - Shrink if current size is higher than size requested. + type: bool + default: true + resizefs: + description: + - Resize the underlying filesystem together with the logical volume. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. + Attempts to resize other filesystem types will fail. + type: bool + default: false +notes: + - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume). +''' + +EXAMPLES = ''' +- name: Create a logical volume of 512m + community.general.lvol: + vg: firefly + lv: test + size: 512 + +- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb + community.general.lvol: + vg: firefly + lv: test + size: 512 + pvs: /dev/sda,/dev/sdb + +- name: Create cache pool logical volume + community.general.lvol: + vg: firefly + lv: lvcache + size: 512m + opts: --type cache-pool + +- name: Create a logical volume of 512g. + community.general.lvol: + vg: firefly + lv: test + size: 512g + +- name: Create a logical volume the size of all remaining space in the volume group + community.general.lvol: + vg: firefly + lv: test + size: 100%FREE + +- name: Create a logical volume with special options + community.general.lvol: + vg: firefly + lv: test + size: 512g + opts: -r 16 + +- name: Extend the logical volume to 1024m. + community.general.lvol: + vg: firefly + lv: test + size: 1024 + +- name: Extend the logical volume to consume all remaining space in the volume group + community.general.lvol: + vg: firefly + lv: test + size: +100%FREE + +- name: Extend the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: +512M + +- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem + community.general.lvol: + vg: firefly + lv: test + size: 100%PVS + resizefs: true + +- name: Resize the logical volume to % of VG + community.general.lvol: + vg: firefly + lv: test + size: 80%VG + force: true + +- name: Reduce the logical volume to 512m + community.general.lvol: + vg: firefly + lv: test + size: 512 + force: true + +- name: Reduce the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: -512M + force: true + +- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one + community.general.lvol: + vg: firefly + lv: test + size: 512 + shrink: false + +- name: Remove the logical volume. + community.general.lvol: + vg: firefly + lv: test + state: absent + force: true + +- name: Create a snapshot volume of the test logical volume. + community.general.lvol: + vg: firefly + lv: test + snapshot: snap1 + size: 100m + +- name: Deactivate a logical volume + community.general.lvol: + vg: firefly + lv: test + active: false + +- name: Create a deactivated logical volume + community.general.lvol: + vg: firefly + lv: test + size: 512g + active: false + +- name: Create a thin pool of 512g + community.general.lvol: + vg: firefly + thinpool: testpool + size: 512g + +- name: Create a thin volume of 128g + community.general.lvol: + vg: firefly + lv: test + thinpool: testpool + size: 128g +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + +LVOL_ENV_VARS = dict( + # make sure we use the C locale when running lvol-related commands + LANG='C', + LC_ALL='C', + LC_MESSAGES='C', + LC_CTYPE='C', +) + + +def mkversion(major, minor, patch): + return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) + + +def parse_lvs(data): + lvs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + lvs.append({ + 'name': parts[0].replace('[', '').replace(']', ''), + 'size': float(parts[1]), + 'active': (parts[2][4] == 'a'), + 'thinpool': (parts[2][0] == 't'), + 'thinvol': (parts[2][0] == 'V'), + }) + return lvs + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'size': float(parts[1]), + 'free': float(parts[2]), + 'ext_size': float(parts[3]) + }) + return vgs + + +def get_lvm_version(module): + ver_cmd = module.get_bin_path("lvm", required=True) + rc, out, err = module.run_command("%s version" % (ver_cmd)) + if rc != 0: + return None + m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) + if not m: + return None + return mkversion(m.group(1), m.group(2), m.group(3)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + lv=dict(type='str'), + size=dict(type='str'), + opts=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + force=dict(type='bool', default=False), + shrink=dict(type='bool', default=True), + active=dict(type='bool', default=True), + snapshot=dict(type='str'), + pvs=dict(type='str'), + resizefs=dict(type='bool', default=False), + thinpool=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=( + ['lv', 'thinpool'], + ), + ) + + module.run_command_environ_update = LVOL_ENV_VARS + + # Determine if the "--yes" option should be used + version_found = get_lvm_version(module) + if version_found is None: + module.fail_json(msg="Failed to get LVM version number") + version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option + if version_found >= version_yesopt: + yesopt = "--yes" + else: + yesopt = "" + + vg = module.params['vg'] + lv = module.params['lv'] + size = module.params['size'] + opts = module.params['opts'] + state = module.params['state'] + force = module.boolean(module.params['force']) + shrink = module.boolean(module.params['shrink']) + active = module.boolean(module.params['active']) + resizefs = module.boolean(module.params['resizefs']) + thinpool = module.params['thinpool'] + size_opt = 'L' + size_unit = 'm' + size_operator = None + snapshot = module.params['snapshot'] + pvs = module.params['pvs'] + + if pvs is None: + pvs = "" + else: + pvs = pvs.replace(",", " ") + + if opts is None: + opts = "" + + # Add --test option when running in check-mode + if module.check_mode: + test_opt = ' --test' + else: + test_opt = '' + + if size: + # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing + if size.startswith('+'): + size_operator = '+' + size = size[1:] + elif size.startswith('-'): + size_operator = '-' + size = size[1:] + # LVCREATE(8) does not support [+-] + + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage + if '%' in size: + size_parts = size.split('%', 1) + size_percent = int(size_parts[0]) + if size_percent > 100: + module.fail_json(msg="Size percentage cannot be larger than 100%") + size_whole = size_parts[1] + if size_whole == 'ORIGIN': + module.fail_json(msg="Snapshot Volumes are not supported") + elif size_whole not in ['VG', 'PVS', 'FREE']: + module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") + size_opt = 'l' + size_unit = '' + + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit + if '%' not in size: + if size[-1].lower() in 'bskmgtpe': + size_unit = size[-1] + size = size[0:-1] + + try: + float(size) + if not size[0].isdigit(): + raise ValueError() + except ValueError: + module.fail_json(msg="Bad size specification of '%s'" % size) + + # when no unit, megabytes by default + if size_opt == 'l': + unit = 'm' + else: + unit = size_unit + + # Get information on volume group requested + vgs_cmd = module.get_bin_path("vgs", required=True) + rc, current_vgs, err = module.run_command( + "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + vgs = parse_vgs(current_vgs) + this_vg = vgs[0] + + # Get information on logical volume requested + lvs_cmd = module.get_bin_path("lvs", required=True) + rc, current_lvs, err = module.run_command( + "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + changed = False + + lvs = parse_lvs(current_lvs) + + if snapshot: + # Check snapshot pre-conditions + for test_lv in lvs: + if test_lv['name'] == lv or test_lv['name'] == thinpool: + if not test_lv['thinpool'] and not thinpool: + break + else: + module.fail_json(msg="Snapshots of thin pool LVs are not supported.") + else: + module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) + check_lv = snapshot + elif thinpool: + if lv: + # Check thin volume pre-conditions + for test_lv in lvs: + if test_lv['name'] == thinpool: + break + else: + module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg)) + check_lv = lv + else: + check_lv = thinpool + else: + check_lv = lv + + for test_lv in lvs: + if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): + this_lv = test_lv + break + else: + this_lv = None + + msg = '' + if this_lv is None: + if state == 'present': + if size_operator is not None: + if size_operator == "-" or (size_whole not in ["VG", "PVS", "FREE", "ORIGIN", None]): + module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size)) + # Require size argument except for snapshot of thin volumes + if (lv or thinpool) and not size: + for test_lv in lvs: + if test_lv['name'] == lv and test_lv['thinvol'] and snapshot: + break + else: + module.fail_json(msg="No size given.") + + # create LV + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + if snapshot is not None: + if size: + cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) + else: + cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) + elif thinpool and lv: + if size_opt == 'l': + module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") + size_opt = 'V' + cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) + elif thinpool and not lv: + cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + else: + cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) + rc, dummy, err = module.run_command(cmd) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) + else: + if state == 'absent': + # remove LV + if not force: + module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name'])) + lvremove_cmd = module.get_bin_path("lvremove", required=True) + rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) + + elif not size: + pass + + elif size_opt == 'l': + # Resize LV based on % value + tool = None + size_free = this_vg['free'] + if size_whole == 'VG' or size_whole == 'PVS': + size_requested = size_percent * this_vg['size'] / 100 + else: # size_whole == 'FREE': + size_requested = size_percent * this_vg['free'] / 100 + + if size_operator == '+': + size_requested += this_lv['size'] + elif size_operator == '-': + size_requested = this_lv['size'] - size_requested + + # According to latest documentation (LVM2-2.03.11) all tools round down + size_requested -= (size_requested % this_vg['ext_size']) + + if this_lv['size'] < size_requested: + if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): + tool = module.get_bin_path("lvextend", required=True) + else: + module.fail_json( + msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % + (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) + ) + elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large + if size_requested < 1: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + elif not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name'])) + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') + + if tool: + if resizefs: + tool = '%s %s' % (tool, '--resizefs') + if size_operator: + cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + else: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + else: + # resize LV based on absolute values + tool = None + if float(size) > this_lv['size'] or size_operator == '+': + tool = module.get_bin_path("lvextend", required=True) + elif shrink and float(size) < this_lv['size'] or size_operator == '-': + if float(size) == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + if not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name'])) + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') + + if tool: + if resizefs: + tool = '%s %s' % (tool, '--resizefs') + if size_operator: + cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + else: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + if this_lv is not None: + if active: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) + else: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) + + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxc_container.py b/ansible_collections/community/general/plugins/modules/lxc_container.py new file mode 100644 index 000000000..aec8f12dc --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxc_container.py @@ -0,0 +1,1742 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Kevin Carter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: lxc_container +short_description: Manage LXC Containers +description: + - Management of LXC containers. +author: "Kevin Carter (@cloudnull)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of a container. + type: str + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + - overlayfs + - zfs + description: + - Backend storage type for the container. + type: str + default: dir + template: + description: + - Name of the template to use within an LXC create. + type: str + default: ubuntu + template_options: + description: + - Template options when building the container. + type: str + config: + description: + - Path to the LXC configuration file. + type: path + lv_name: + description: + - Name of the logical volume, defaults to the container name. + - If not specified, it defaults to C($CONTAINER_NAME). + type: str + vg_name: + description: + - If backend store is lvm, specify the name of the volume group. + type: str + default: lxc + thinpool: + description: + - Use LVM thin pool called TP. + type: str + fs_type: + description: + - Create fstype TYPE. + type: str + default: ext4 + fs_size: + description: + - File system Size. + type: str + default: 5G + directory: + description: + - Place rootfs directory under DIR. + type: path + zfs_root: + description: + - Create zfs under given zfsroot. + type: str + container_command: + description: + - Run a command within a container. + type: str + lxc_path: + description: + - Place container under C(PATH). + type: path + container_log: + description: + - Enable a container log for host actions to the container. + type: bool + default: false + container_log_level: + choices: + - Info + - info + - INFO + - Error + - error + - ERROR + - Debug + - debug + - DEBUG + description: + - Set the log level for a container where I(container_log) was set. + type: str + required: false + default: INFO + clone_name: + description: + - Name of the new cloned server. + - This is only used when state is clone. + type: str + clone_snapshot: + description: + - Create a snapshot a container when cloning. + - This is not supported by all container storage backends. + - Enabling this may fail if the backing store does not support snapshots. + type: bool + default: false + archive: + description: + - Create an archive of a container. + - This will create a tarball of the running container. + type: bool + default: false + archive_path: + description: + - Path the save the archived container. + - If the path does not exist the archive method will attempt to create it. + type: path + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running + container. + type: str + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + - clone + description: + - Define the state of a container. + - If you clone a container using I(clone_name) the newly cloned + container created in a stopped state. + - The running container will be stopped while the clone operation is + happening and upon completion of the clone the original container + state will be restored. + type: str + default: started + container_config: + description: + - A list of C(key=value) options to use when configuring a container. + type: list + elements: str +requirements: + - 'lxc >= 2.0 # OS package' + - 'python3 >= 3.5 # OS Package' + - 'python3-lxc # OS Package' +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already exists in the users namespace the module will + simply return as "unchanged". + - The I(container_command) can be used with any state except C(absent). If + used with state C(stopped) the container will be C(started), the command + executed, and then the container C(stopped) again. Likewise if I(state=stopped) + and the container does not exist it will be first created, + C(started), the command executed, and then C(stopped). If you use a "|" + in the variable you can use common script formatting within the variable + itself. The I(container_command) option will always execute as BASH. + When using I(container_command), a log file is created in the C(/tmp/) directory + which contains both C(stdout) and C(stderr) of any command executed. + - If I(archive=true) the system will attempt to create a compressed + tarball of the running container. The I(archive) option supports LVM backed + containers and will create a snapshot of the running container when + creating the archive. + - If your distro does not have a package for C(python3-lxc), which is a + requirement for this module, it can be installed from source at + U(https://github.com/lxc/python3-lxc) or installed via pip using the + package name C(lxc). +''' + +EXAMPLES = r""" +- name: Create a started container + community.general.lxc_container: + name: test-container-started + container_log: true + template: ubuntu + state: started + template_options: --release trusty + +- name: Create a stopped container + community.general.lxc_container: + name: test-container-stopped + container_log: true + template: ubuntu + state: stopped + template_options: --release trusty + +- name: Create a frozen container + community.general.lxc_container: + name: test-container-frozen + container_log: true + template: ubuntu + state: frozen + template_options: --release trusty + container_command: | + echo 'hello world.' | tee /opt/started-frozen + +# Create filesystem container, configure it, and archive it, and start it. +- name: Create filesystem container + community.general.lxc_container: + name: test-container-config + backing_store: dir + container_log: true + template: ubuntu + state: started + archive: true + archive_compression: none + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + template_options: --release trusty + +# Create an lvm container, run a complex command in it, add additional +# configuration to it, create an archive of it, and finally leave the container +# in a frozen state. The container archive will be compressed using bzip2 +- name: Create a frozen lvm container + community.general.lxc_container: + name: test-container-lvm + container_log: true + template: ubuntu + state: frozen + backing_store: lvm + template_options: --release trusty + container_command: | + apt-get update + apt-get install -y vim lxc-dev + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + archive: true + archive_compression: bzip2 + register: lvm_container_info + +- name: Debug info on container "test-container-lvm" + ansible.builtin.debug: + var: lvm_container_info + +- name: Run a command in a container and ensure its in a "stopped" state. + community.general.lxc_container: + name: test-container-started + state: stopped + container_command: | + echo 'hello world.' | tee /opt/stopped + +- name: Run a command in a container and ensure its it in a "frozen" state. + community.general.lxc_container: + name: test-container-stopped + state: frozen + container_command: | + echo 'hello world.' | tee /opt/frozen + +- name: Start a container + community.general.lxc_container: + name: test-container-stopped + state: started + +- name: Run a command in a container and then restart it + community.general.lxc_container: + name: test-container-started + state: restarted + container_command: | + echo 'hello world.' | tee /opt/restarted + +- name: Run a complex command within a "running" container + community.general.lxc_container: + name: test-container-started + container_command: | + apt-get update + apt-get install -y curl wget vim apache2 + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + +# Create an archive of an existing container, save the archive to a defined +# path and then destroy it. +- name: Archive container + community.general.lxc_container: + name: test-container-started + state: absent + archive: true + archive_path: /opt/archives + +# Create a container using overlayfs, create an archive of it, create a +# snapshot clone of the container and and finally leave the container +# in a frozen state. The container archive will be compressed using gzip. +- name: Create an overlayfs container archive and clone it + community.general.lxc_container: + name: test-container-overlayfs + container_log: true + template: ubuntu + state: started + backing_store: overlayfs + template_options: --release trusty + clone_snapshot: true + clone_name: test-container-overlayfs-clone-snapshot + archive: true + archive_compression: gzip + register: clone_container_info + +- name: Debug info on container "test-container" + ansible.builtin.debug: + var: clone_container_info + +- name: Clone a container using snapshot + community.general.lxc_container: + name: test-container-overlayfs-clone-snapshot + backing_store: overlayfs + clone_name: test-container-overlayfs-clone-snapshot2 + clone_snapshot: true + +- name: Create a new container and clone it + community.general.lxc_container: + name: test-container-new-archive + backing_store: dir + clone_name: test-container-new-archive-clone + +- name: Archive and clone a container then destroy it + community.general.lxc_container: + name: test-container-new-archive + state: absent + clone_name: test-container-new-archive-destroyed-clone + archive: true + archive_compression: gzip + +- name: Start a cloned container. + community.general.lxc_container: + name: test-container-new-archive-destroyed-clone + state: started + +- name: Destroy a container + community.general.lxc_container: + name: '{{ item }}' + state: absent + with_items: + - test-container-stopped + - test-container-started + - test-container-frozen + - test-container-lvm + - test-container-config + - test-container-overlayfs + - test-container-overlayfs-clone + - test-container-overlayfs-clone-snapshot + - test-container-overlayfs-clone-snapshot2 + - test-container-new-archive + - test-container-new-archive-clone + - test-container-new-archive-destroyed-clone +""" + +RETURN = r""" +lxc_container: + description: container information + returned: success + type: complex + contains: + name: + description: name of the lxc container + returned: success + type: str + sample: test_host + init_pid: + description: pid of the lxc init process + returned: success + type: int + sample: 19786 + interfaces: + description: list of the container's network interfaces + returned: success + type: list + sample: [ "eth0", "lo" ] + ips: + description: list of ips + returned: success + type: list + sample: [ "10.0.3.3" ] + state: + description: resulting state of the container + returned: success + type: str + sample: "running" + archive: + description: resulting state of the container + returned: success, when archive is true + type: str + sample: "/tmp/test-container-config.tar" + clone: + description: if the container was cloned + returned: success, when clone_name is specified + type: bool + sample: true +""" + +import os +import os.path +import re +import shutil +import subprocess +import tempfile +import time +import shlex + +try: + import lxc +except ImportError: + HAS_LXC = False +else: + HAS_LXC = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE +from ansible.module_utils.common.text.converters import to_text, to_bytes + + +# LXC_COMPRESSION_MAP is a map of available compression types when creating +# an archive of a container. +LXC_COMPRESSION_MAP = { + 'gzip': { + 'extension': 'tar.tgz', + 'argument': '-czf' + }, + 'bzip2': { + 'extension': 'tar.bz2', + 'argument': '-cjf' + }, + 'none': { + 'extension': 'tar', + 'argument': '-cf' + } +} + + +# LXC_COMMAND_MAP is a map of variables that are available to a method based +# on the state the container is in. +LXC_COMMAND_MAP = { + 'create': { + 'variables': { + 'config': '--config', + 'template': '--template', + 'backing_store': '--bdev', + 'lxc_path': '--lxcpath', + 'lv_name': '--lvname', + 'vg_name': '--vgname', + 'thinpool': '--thinpool', + 'fs_type': '--fstype', + 'fs_size': '--fssize', + 'directory': '--dir', + 'zfs_root': '--zfsroot' + } + }, + 'clone': { + 'variables-lxc-copy': { + 'backing_store': '--backingstorage', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--name', + 'clone_name': '--newname' + }, + # lxc-clone is deprecated in favor of lxc-copy + 'variables-lxc-clone': { + 'backing_store': '--backingstore', + 'lxc_path': '--lxcpath', + 'fs_size': '--fssize', + 'name': '--orig', + 'clone_name': '--new' + } + } +} + + +# LXC_BACKING_STORE is a map of available storage backends and options that +# are incompatible with the given storage backend. +LXC_BACKING_STORE = { + 'dir': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ], + 'lvm': [ + 'zfs_root' + ], + 'btrfs': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' + ], + 'loop': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'overlayfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root' + ], + 'zfs': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ] +} + + +# LXC_LOGGING_LEVELS is a map of available log levels +LXC_LOGGING_LEVELS = { + 'INFO': ['info', 'INFO', 'Info'], + 'ERROR': ['error', 'ERROR', 'Error'], + 'DEBUG': ['debug', 'DEBUG', 'Debug'] +} + + +# LXC_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXC_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen', + 'clone': '_clone' +} + + +# This is used to attach to a running container and execute commands from +# within the container on the host. This will provide local access to a +# container without using SSH. The template will attempt to work within the +# home directory of the user that was attached to the container and source +# that users environment variables by default. +ATTACH_TEMPLATE = """#!/usr/bin/env bash +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" + if [[ -f ".bashrc" ]];then + source .bashrc + unset HOSTNAME + fi +popd + +# User defined command +%(container_command)s +""" + + +def create_script(command): + """Write out a script onto a target. + + This method should be backward compatible with Python when executing + from within the container. + + :param command: command to run, this can be a script and can use spacing + with newlines as separation. + :type command: ``str`` + """ + + (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') + f = os.fdopen(fd, 'wb') + try: + f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict')) + f.flush() + finally: + f.close() + + # Ensure the script is executable. + os.chmod(script_file, int('0700', 8)) + + # Output log file. + stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') + + # Error log file. + stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') + + # Execute the script command. + try: + subprocess.Popen( + [script_file], + stdout=stdout_file, + stderr=stderr_file + ).communicate() + finally: + # Close the log files. + stderr_file.close() + stdout_file.close() + + # Remove the script file upon completion of execution. + os.remove(script_file) + + +class LxcContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.state = self.module.params['state'] + self.state_change = False + self.lxc_vg = None + self.lxc_path = self.module.params['lxc_path'] + self.container_name = self.module.params['name'] + self.container = self.get_container_bind() + self.archive_info = None + self.clone_info = None + + def get_container_bind(self): + return lxc.Container(name=self.container_name) + + @staticmethod + def _roundup(num): + """Return a rounded floating point number. + + :param num: Number to round up. + :type: ``float`` + :returns: Rounded up number. + :rtype: ``int`` + """ + num, part = str(num).split('.') + num = int(num) + if int(part) != 0: + num += 1 + return num + + @staticmethod + def _container_exists(container_name, lxc_path=None): + """Check if a container exists. + + :param container_name: Name of the container. + :type: ``str`` + :returns: True or False if the container is found. + :rtype: ``bol`` + """ + return any(c == container_name for c in lxc.list_containers(config_path=lxc_path)) + + @staticmethod + def _add_variables(variables_dict, build_command): + """Return a command list with all found options. + + :param variables_dict: Pre-parsed optional variables used from a + seed command. + :type variables_dict: ``dict`` + :param build_command: Command to run. + :type build_command: ``list`` + :returns: list of command options. + :rtype: ``list`` + """ + + for key, value in variables_dict.items(): + build_command.append(str(key)) + build_command.append(str(value)) + return build_command + + def _get_vars(self, variables): + """Return a dict of all variables as found within the module. + + :param variables: Hash of all variables to find. + :type variables: ``dict`` + """ + + # Remove incompatible storage backend options. + variables = variables.copy() + for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + variables.pop(v, None) + + false_values = BOOLEANS_FALSE.union([None, '']) + result = dict( + (v, self.module.params[k]) + for k, v in variables.items() + if self.module.params[k] not in false_values + ) + return result + + def _config(self): + """Configure an LXC container. + + Write new configuration values to the lxc config file. This will + stop the container if it's running write the new options and then + restart the container upon completion. + """ + + _container_config = self.module.params['container_config'] + if not _container_config: + return False + + container_config_file = self.container.config_file_name + with open(container_config_file, 'rb') as f: + container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True) + + parsed_options = [i.split('=', 1) for i in _container_config] + config_change = False + for key, value in parsed_options: + key = key.strip() + value = value.strip() + new_entry = '%s = %s\n' % (key, value) + keyre = re.compile(r'%s(\s+)?=' % key) + for option_line in container_config: + # Look for key in config + if keyre.match(option_line): + dummy, _value = option_line.split('=', 1) + config_value = ' '.join(_value.split()) + line_index = container_config.index(option_line) + # If the sanitized values don't match replace them + if value != config_value: + line_index += 1 + if new_entry not in container_config: + config_change = True + container_config.insert(line_index, new_entry) + # Break the flow as values are written or not at this point + break + else: + config_change = True + container_config.append(new_entry) + + # If the config changed restart the container. + if config_change: + container_state = self._get_state() + if container_state != 'stopped': + self.container.stop() + + with open(container_config_file, 'wb') as f: + f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config]) + + self.state_change = True + if container_state == 'running': + self._container_startup() + elif container_state == 'frozen': + self._container_startup() + self.container.freeze() + + def _container_create_clone(self): + """Clone a new LXC container from an existing container. + + This method will clone an existing container to a new container using + the `clone_name` variable as the new container name. The method will + create a container if the container `name` does not exist. + + Note that cloning a container will ensure that the original container + is "stopped" before the clone can be done. Because this operation can + require a state change the method will return the original container + to its prior state upon completion of the clone. + + Once the clone is complete the new container will be left in a stopped + state. + """ + + # Ensure that the state of the original container is stopped + container_state = self._get_state() + if container_state != 'stopped': + self.state_change = True + self.container.stop() + + # lxc-clone is deprecated in favor of lxc-copy + clone_vars = 'variables-lxc-copy' + clone_cmd = self.module.get_bin_path('lxc-copy') + if not clone_cmd: + clone_vars = 'variables-lxc-clone' + clone_cmd = self.module.get_bin_path('lxc-clone', True) + + build_command = [ + clone_cmd, + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['clone'][clone_vars] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params['clone_snapshot']: + build_command.append('--snapshot') + # Check for backing_store == overlayfs if so force the use of snapshot + # If overlay fs is used and snapshot is unset the clone command will + # fail with an unsupported type. + elif self.module.params['backing_store'] == 'overlayfs': + build_command.append('--snapshot') + + rc, return_data, err = self.module.run_command(build_command) + if rc != 0: + message = "Failed executing %s." % os.path.basename(clone_cmd) + self.failure( + err=err, rc=rc, msg=message, command=' '.join( + build_command + ) + ) + else: + self.state_change = True + # Restore the original state of the origin container if it was + # not in a stopped state. + if container_state == 'running': + self.container.start() + elif container_state == 'frozen': + self.container.start() + self.container.freeze() + + return True + + def _create(self): + """Create a new LXC container. + + This method will build and execute a shell command to build the + container. It would have been nice to simply use the lxc python library + however at the time this was written the python library, in both py2 + and py3 didn't support some of the more advanced container create + processes. These missing processes mainly revolve around backing + LXC containers with block devices. + """ + + build_command = [ + self.module.get_bin_path('lxc-create', True), + '--name', self.container_name, + '--quiet' + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['create']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params['container_log']: + # Set the logging path to the /var/log/lxc if uid is root. else + # set it to the home folder of the user executing. + try: + if os.getuid() != 0: + log_path = os.getenv('HOME') + else: + if not os.path.isdir('/var/log/lxc/'): + os.makedirs('/var/log/lxc/') + log_path = '/var/log/lxc/' + except OSError: + log_path = os.getenv('HOME') + + build_command.extend([ + '--logfile', + os.path.join( + log_path, 'lxc-%s.log' % self.container_name + ), + '--logpriority', + self.module.params.get( + 'container_log_level' + ).upper() + ]) + + # Add the template commands to the end of the command if there are any + template_options = self.module.params['template_options'] + if template_options: + build_command.append('--') + build_command += shlex.split(template_options) + + rc, return_data, err = self.module.run_command(build_command) + if rc != 0: + message = "Failed executing lxc-create." + self.failure( + err=err, rc=rc, msg=message, command=' '.join(build_command) + ) + else: + self.state_change = True + + def _container_data(self): + """Returns a dict of container information. + + :returns: container data + :rtype: ``dict`` + """ + + return { + 'interfaces': self.container.get_interfaces(), + 'ips': self.container.get_ips(), + 'state': self._get_state(), + 'init_pid': int(self.container.init_pid), + 'name': self.container_name, + } + + def _unfreeze(self): + """Unfreeze a container. + + :returns: True or False based on if the container was unfrozen. + :rtype: ``bol`` + """ + + unfreeze = self.container.unfreeze() + if unfreeze: + self.state_change = True + return unfreeze + + def _get_state(self): + """Return the state of a container. + + If the container is not found the state returned is "absent" + + :returns: state of a container as a lower case string. + :rtype: ``str`` + """ + + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + return str(self.container.state).lower() + return str('absent') + + def _execute_command(self): + """Execute a shell command.""" + + container_command = self.module.params['container_command'] + if container_command: + container_state = self._get_state() + if container_state == 'frozen': + self._unfreeze() + elif container_state == 'stopped': + self._container_startup() + + self.container.attach_wait(create_script, container_command) + self.state_change = True + + def _container_startup(self, timeout=60): + """Ensure a container is started. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + self.container = self.get_container_bind() + for dummy in range(timeout): + if self._get_state() == 'running': + return True + + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) + self.failure( + lxc_container=self._container_data(), + error='Failed to start container [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + def _check_archive(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + if self.module.params['archive']: + self.archive_info = { + 'archive': self._container_create_tar() + } + + def _check_clone(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + clone_name = self.module.params['clone_name'] + if clone_name: + if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): + self.clone_info = { + 'cloned': self._container_create_clone() + } + else: + self.clone_info = { + 'cloned': False + } + + def _destroyed(self, timeout=60): + """Ensure a container is destroyed. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + for dummy in range(timeout): + if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + break + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + + if self._get_state() != 'stopped': + self.state_change = True + self.container.stop() + + if self.container.destroy(): + self.state_change = True + + # post destroy attempt sleep for 1 second. + time.sleep(1) + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to destroy container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to be destroyed. Check' + ' that lxc is available and that the container is in a' + ' functional state.' % self.container_name + ) + + def _frozen(self, count=0): + """Ensure a container is frozen. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='frozen') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + container_state = self._get_state() + if container_state == 'frozen': + pass + elif container_state == 'running': + self.container.freeze() + self.state_change = True + else: + self._container_startup() + self.container.freeze() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._frozen(count) + + def _restarted(self, count=0): + """Ensure a container is restarted. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='restart') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Run container startup + self._container_startup() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._restarted(count) + + def _stopped(self, count=0): + """Ensure a container is stopped. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='stop') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._stopped(count) + + def _started(self, count=0): + """Ensure a container is started. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='start') + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): + container_state = self._get_state() + if container_state == 'running': + pass + elif container_state == 'frozen': + self._unfreeze() + elif not self._container_startup(): + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + + # Check if the container is to be cloned + self._check_clone() + else: + self._create() + count += 1 + self._started(count) + + def _get_lxc_vg(self): + """Return the name of the Volume Group used in LXC.""" + + build_command = [ + self.module.get_bin_path('lxc-config', True), + "lxc.bdev.lvm.vg" + ] + rc, vg, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to read LVM VG from LXC config', + command=' '.join(build_command) + ) + else: + return str(vg.strip()) + + def _lvm_lv_list(self): + """Return a list of all lv in a current vg.""" + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvs', True) + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to get list of LVs', + command=' '.join(build_command) + ) + + all_lvms = [i.split() for i in stdout.splitlines()][1:] + return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] + + def _get_vg_free_pe(self, vg_name): + """Return the available size of a given VG. + + :param vg_name: Name of volume. + :type vg_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + build_command = [ + 'vgdisplay', + vg_name, + '--units', + 'g' + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read vg %s' % vg_name, + command=' '.join(build_command) + ) + + vg_info = [i.strip() for i in stdout.splitlines()][1:] + free_pe = [i for i in vg_info if i.startswith('Free')] + _free_pe = free_pe[0].split() + return float(_free_pe[-2]), _free_pe[-1] + + def _get_lv_size(self, lv_name): + """Return the available size of a given LV. + + :param lv_name: Name of volume. + :type lv_name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + + vg = self._get_lxc_vg() + lv = os.path.join(vg, lv_name) + build_command = [ + 'lvdisplay', + lv, + '--units', + 'g' + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read lv %s' % lv, + command=' '.join(build_command) + ) + + lv_info = [i.strip() for i in stdout.splitlines()][1:] + _free_pe = [i for i in lv_info if i.startswith('LV Size')] + free_pe = _free_pe[0].split() + return self._roundup(float(free_pe[-2])), free_pe[-1] + + def _lvm_snapshot_create(self, source_lv, snapshot_name, + snapshot_size_gb=5): + """Create an LVM snapshot. + + :param source_lv: Name of lv to snapshot + :type source_lv: ``str`` + :param snapshot_name: Name of lv snapshot + :type snapshot_name: ``str`` + :param snapshot_size_gb: Size of snapshot to create + :type snapshot_size_gb: ``int`` + """ + + vg = self._get_lxc_vg() + free_space, messurement = self._get_vg_free_pe(vg_name=vg) + + if free_space < float(snapshot_size_gb): + message = ( + 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' + ' [ %s ]' % (snapshot_size_gb, free_space, vg) + ) + self.failure( + error='Not enough space to create snapshot', + rc=2, + msg=message + ) + + # Create LVM Snapshot + build_command = [ + self.module.get_bin_path('lvcreate', True), + "-n", + snapshot_name, + "-s", + os.path.join(vg, source_lv), + "-L%sg" % snapshot_size_gb + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to Create LVM snapshot %s/%s --> %s' + % (vg, source_lv, snapshot_name) + ) + + def _lvm_lv_mount(self, lv_name, mount_point): + """mount an lv. + + :param lv_name: name of the logical volume to mount + :type lv_name: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + vg = self._get_lxc_vg() + + build_command = [ + self.module.get_bin_path('mount', True), + "/dev/%s/%s" % (vg, lv_name), + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mountlvm lv %s/%s to %s' + % (vg, lv_name, mount_point) + ) + + def _create_tar(self, source_dir): + """Create an archive of a given ``source_dir`` to ``output_path``. + + :param source_dir: Path to the directory to be archived. + :type source_dir: ``str`` + """ + + old_umask = os.umask(int('0077', 8)) + + archive_path = self.module.params['archive_path'] + if not os.path.isdir(archive_path): + os.makedirs(archive_path) + + archive_compression = self.module.params['archive_compression'] + compression_type = LXC_COMPRESSION_MAP[archive_compression] + + # remove trailing / if present. + archive_name = '%s.%s' % ( + os.path.join( + archive_path, + self.container_name + ), + compression_type['extension'] + ) + + build_command = [ + self.module.get_bin_path('tar', True), + '--directory=%s' % os.path.realpath(source_dir), + compression_type['argument'], + archive_name, + '.' + ] + + rc, stdout, err = self.module.run_command( + build_command + ) + + os.umask(old_umask) + + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to create tar archive', + command=' '.join(build_command) + ) + + return archive_name + + def _lvm_lv_remove(self, lv_name): + """Remove an LV. + + :param lv_name: The name of the logical volume + :type lv_name: ``str`` + """ + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvremove', True), + "-f", + "%s/%s" % (vg, lv_name), + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to remove LVM LV %s/%s' % (vg, lv_name), + command=' '.join(build_command) + ) + + def _rsync_data(self, container_path, temp_dir): + """Sync the container directory to the temp directory. + + :param container_path: path to the container container + :type container_path: ``str`` + :param temp_dir: path to the temporary local working directory + :type temp_dir: ``str`` + """ + # This loop is created to support overlayfs archives. This should + # squash all of the layers into a single archive. + fs_paths = container_path.split(':') + if 'overlayfs' in fs_paths: + fs_paths.pop(fs_paths.index('overlayfs')) + + for fs_path in fs_paths: + # Set the path to the container data + fs_path = os.path.dirname(fs_path) + + # Run the sync command + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + fs_path, + temp_dir, + ] + rc, stdout, err = self.module.run_command( + build_command, + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('umount', True), + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to unmount [ %s ]' % mount_point, + command=' '.join(build_command) + ) + + def _overlayfs_mount(self, lowerdir, upperdir, mount_point): + """mount an lv. + + :param lowerdir: name/path of the lower directory + :type lowerdir: ``str`` + :param upperdir: name/path of the upper directory + :type upperdir: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + + build_command = [ + self.module.get_bin_path('mount', True), + '-t', 'overlayfs', + '-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir), + 'overlayfs', + mount_point, + ] + rc, stdout, err = self.module.run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mount overlayfs:%s:%s to %s -- Command: %s' + % (lowerdir, upperdir, mount_point, build_command) + ) + + def _container_create_tar(self): + """Create a tar archive from an LXC container. + + The process is as follows: + * Stop or Freeze the container + * Create temporary dir + * Copy container and config to temporary directory + * If LVM backed: + * Create LVM snapshot of LV backing the container + * Mount the snapshot to tmpdir/rootfs + * Restore the state of the container + * Create tar of tmpdir + * Clean up + """ + + # Create a temp dir + temp_dir = tempfile.mkdtemp() + + # Set the name of the working dir, temp + container_name + work_dir = os.path.join(temp_dir, self.container_name) + + # LXC container rootfs + lxc_rootfs = self.container.get_config_item('lxc.rootfs') + + # Test if the containers rootfs is a block device + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + + # Test if the container is using overlayfs + overlayfs_backed = lxc_rootfs.startswith('overlayfs') + + mount_point = os.path.join(work_dir, 'rootfs') + + # Set the snapshot name if needed + snapshot_name = '%s_lxc_snapshot' % self.container_name + + container_state = self._get_state() + try: + # Ensure the original container is stopped or frozen + if container_state not in ['stopped', 'frozen']: + if container_state == 'running': + self.container.freeze() + else: + self.container.stop() + + # Sync the container data from the container_path to work_dir + self._rsync_data(lxc_rootfs, temp_dir) + + if block_backed: + if snapshot_name not in self._lvm_lv_list(): + if not os.path.exists(mount_point): + os.makedirs(mount_point) + + # Take snapshot + size, measurement = self._get_lv_size( + lv_name=self.container_name + ) + self._lvm_snapshot_create( + source_lv=self.container_name, + snapshot_name=snapshot_name, + snapshot_size_gb=size + ) + + # Mount snapshot + self._lvm_lv_mount( + lv_name=snapshot_name, + mount_point=mount_point + ) + else: + self.failure( + err='snapshot [ %s ] already exists' % snapshot_name, + rc=1, + msg='The snapshot [ %s ] already exists. Please clean' + ' up old snapshot of containers before continuing.' + % snapshot_name + ) + elif overlayfs_backed: + lowerdir, upperdir = lxc_rootfs.split(':')[1:] + self._overlayfs_mount( + lowerdir=lowerdir, + upperdir=upperdir, + mount_point=mount_point + ) + + # Set the state as changed and set a new fact + self.state_change = True + return self._create_tar(source_dir=work_dir) + finally: + if block_backed or overlayfs_backed: + # unmount snapshot + self._unmount(mount_point) + + if block_backed: + # Remove snapshot + self._lvm_lv_remove(snapshot_name) + + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + + # Remove tmpdir + shutil.rmtree(temp_dir) + + def check_count(self, count, method): + if count > 1: + self.failure( + error='Failed to %s container' % method, + rc=1, + msg='The container [ %s ] failed to %s. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % (self.container_name, method) + ) + + def failure(self, **kwargs): + """Return a Failure when running an Ansible command. + + :param error: ``str`` Error that occurred. + :param rc: ``int`` Return code while executing an Ansible command. + :param msg: ``str`` Message to report. + """ + + self.module.fail_json(**kwargs) + + def run(self): + """Run the main method.""" + + action = getattr(self, LXC_ANSIBLE_STATES[self.state]) + action() + + outcome = self._container_data() + if self.archive_info: + outcome.update(self.archive_info) + + if self.clone_info: + outcome.update(self.clone_info) + + self.module.exit_json( + changed=self.state_change, + lxc_container=outcome + ) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + template=dict( + type='str', + default='ubuntu' + ), + backing_store=dict( + type='str', + choices=list(LXC_BACKING_STORE.keys()), + default='dir' + ), + template_options=dict( + type='str' + ), + config=dict( + type='path', + ), + vg_name=dict( + type='str', + default='lxc' + ), + thinpool=dict( + type='str' + ), + fs_type=dict( + type='str', + default='ext4' + ), + fs_size=dict( + type='str', + default='5G' + ), + directory=dict( + type='path' + ), + zfs_root=dict( + type='str' + ), + lv_name=dict( + type='str' + ), + lxc_path=dict( + type='path' + ), + state=dict( + choices=list(LXC_ANSIBLE_STATES.keys()), + default='started' + ), + container_command=dict( + type='str' + ), + container_config=dict( + type='list', + elements='str' + ), + container_log=dict( + type='bool', + default=False + ), + container_log_level=dict( + choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], + default='INFO' + ), + clone_name=dict( + type='str', + ), + clone_snapshot=dict( + type='bool', + default='false' + ), + archive=dict( + type='bool', + default=False + ), + archive_path=dict( + type='path', + ), + archive_compression=dict( + choices=list(LXC_COMPRESSION_MAP.keys()), + default='gzip' + ) + ), + supports_check_mode=False, + required_if=([ + ('archive', True, ['archive_path']) + ]), + ) + + if not HAS_LXC: + module.fail_json( + msg='The `lxc` module is not importable. Check the requirements.' + ) + + if not module.params['lv_name']: + module.params['lv_name'] = module.params['name'] + + lxc_manage = LxcContainerManagement(module=module) + lxc_manage.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxca_cmms.py b/ansible_collections/community/general/plugins/modules/lxca_cmms.py new file mode 100644 index 000000000..1f811a7ef --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxca_cmms.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_cmms +short_description: Custom module for lxca cmms inventory utility +description: + - This module returns/displays a inventory details of cmms + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uuid: + description: + uuid of device, this is string with length greater than 16. + type: str + + command_options: + description: + options to filter nodes information + default: cmms + choices: + - cmms + - cmms_by_uuid + - cmms_by_chassis_uuid + type: str + + chassis: + description: + uuid of chassis, this is string with length greater than 16. + type: str + +extends_documentation_fragment: + - community.general.lxca_common + - community.general.attributes + +''' + +EXAMPLES = ''' +# get all cmms info +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + +# get specific cmms info by uuid +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_uuid + +# get specific cmms info by chassis uuid +- name: Get nodes data from LXCA + community.general.lxca_cmms: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: cmms_by_chassis_uuid + +''' + +RETURN = r''' +result: + description: cmms detail from lxca + returned: success + type: dict + sample: + cmmList: + - machineType: '' + model: '' + type: 'CMM' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '' + model: '' + type: 'CMM' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple cmms details +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import cmms +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _cmms(module, lxca_con): + return cmms(lxca_con) + + +def _cmms_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return cmms(lxca_con, module.params['uuid']) + + +def _cmms_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return cmms(lxca_con, chassis=module.params['chassis']) + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'cmms': _cmms, + 'cmms_by_uuid': _cmms_by_uuid, + 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', + 'cmms_by_chassis_uuid']), + uuid=dict(default=None), + chassis=dict(default=None) +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join((e) for e in exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxca_nodes.py b/ansible_collections/community/general/plugins/modules/lxca_nodes.py new file mode 100644 index 000000000..3b37322ed --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxca_nodes.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +author: + - Naval Patel (@navalkp) + - Prashant Bhosale (@prabhosa) +module: lxca_nodes +short_description: Custom module for lxca nodes inventory utility +description: + - This module returns/displays a inventory details of nodes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + uuid: + description: + uuid of device, this is string with length greater than 16. + type: str + + command_options: + description: + options to filter nodes information + default: nodes + choices: + - nodes + - nodes_by_uuid + - nodes_by_chassis_uuid + - nodes_status_managed + - nodes_status_unmanaged + type: str + + chassis: + description: + uuid of chassis, this is string with length greater than 16. + type: str + +extends_documentation_fragment: + - community.general.lxca_common + - community.general.attributes + +''' + +EXAMPLES = ''' +# get all nodes info +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes + +# get specific nodes info by uuid +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + uuid: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_uuid + +# get specific nodes info by chassis uuid +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + chassis: "3C737AA5E31640CE949B10C129A8B01F" + command_options: nodes_by_chassis_uuid + +# get managed nodes +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_managed + +# get unmanaged nodes +- name: Get nodes data from LXCA + community.general.lxca_nodes: + login_user: USERID + login_password: Password + auth_url: "https://10.243.15.168" + command_options: nodes_status_unmanaged + +''' + +RETURN = r''' +result: + description: nodes detail from lxca + returned: always + type: dict + sample: + nodeList: + - machineType: '6241' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + - machineType: '8871' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + # bunch of properties + # Multiple nodes details +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object +try: + from pylxca import nodes +except ImportError: + pass + + +UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.' +CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.' +SUCCESS_MSG = "Success %s result" + + +def _nodes(module, lxca_con): + return nodes(lxca_con) + + +def _nodes_by_uuid(module, lxca_con): + if not module.params['uuid']: + module.fail_json(msg=UUID_REQUIRED) + return nodes(lxca_con, module.params['uuid']) + + +def _nodes_by_chassis_uuid(module, lxca_con): + if not module.params['chassis']: + module.fail_json(msg=CHASSIS_UUID_REQUIRED) + return nodes(lxca_con, chassis=module.params['chassis']) + + +def _nodes_status_managed(module, lxca_con): + return nodes(lxca_con, status='managed') + + +def _nodes_status_unmanaged(module, lxca_con): + return nodes(lxca_con, status='unmanaged') + + +def setup_module_object(): + """ + this function merge argument spec and create ansible module object + :return: + """ + args_spec = dict(LXCA_COMMON_ARGS) + args_spec.update(INPUT_ARG_SPEC) + module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False) + + return module + + +FUNC_DICT = { + 'nodes': _nodes, + 'nodes_by_uuid': _nodes_by_uuid, + 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid, + 'nodes_status_managed': _nodes_status_managed, + 'nodes_status_unmanaged': _nodes_status_unmanaged, +} + + +INPUT_ARG_SPEC = dict( + command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', + 'nodes_by_chassis_uuid', + 'nodes_status_managed', + 'nodes_status_unmanaged']), + uuid=dict(default=None), chassis=dict(default=None) +) + + +def execute_module(module): + """ + This function invoke commands + :param module: Ansible module object + """ + try: + with connection_object(module) as lxca_con: + result = FUNC_DICT[module.params['command_options']](module, lxca_con) + module.exit_json(changed=False, + msg=SUCCESS_MSG % module.params['command_options'], + result=result) + except Exception as exception: + error_msg = '; '.join(exception.args) + module.fail_json(msg=error_msg, exception=traceback.format_exc()) + + +def main(): + module = setup_module_object() + has_pylxca(module) + execute_module(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxd_container.py b/ansible_collections/community/general/plugins/modules/lxd_container.py new file mode 100644 index 000000000..f10fc4872 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxd_container.py @@ -0,0 +1,862 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Hiroaki Nakamura +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: lxd_container +short_description: Manage LXD instances +description: + - Management of LXD containers and virtual machines. +author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 6.4.0 + diff_mode: + support: full + version_added: 6.4.0 +options: + name: + description: + - Name of an instance. + type: str + required: true + project: + description: + - 'Project of an instance. + See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).' + required: false + type: str + version_added: 4.8.0 + architecture: + description: + - 'The architecture for the instance (for example C(x86_64) or C(i686)). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' + type: str + required: false + config: + description: + - 'The config for the instance (for example C({"limits.cpu": "2"})). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' + - If the instance already exists and its "config" values in metadata + obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines) + are different, this module tries to apply the configurations. + - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true). + type: dict + required: false + ignore_volatile_options: + description: + - If set to C(true), options starting with C(volatile.) are ignored. As a result, + they are reapplied for each execution. + - This default behavior can be changed by setting this option to C(false). + - The default value changed from C(true) to C(false) in community.general 6.0.0. + type: bool + required: false + default: false + version_added: 3.7.0 + profiles: + description: + - Profile to be used by the instance. + type: list + elements: str + devices: + description: + - 'The devices for the instance + (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' + type: dict + required: false + ephemeral: + description: + - Whether or not the instance is ephemeral (for example C(true) or C(false)). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1). + required: false + type: bool + source: + description: + - 'The source for the instance + (e.g. { "type": "image", + "mode": "pull", + "server": "https://images.linuxcontainers.org", + "protocol": "lxd", + "alias": "ubuntu/xenial/amd64" }).' + - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.' + - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).' + required: false + type: dict + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of an instance. + required: false + default: started + type: str + target: + description: + - For cluster deployments. Will attempt to create an instance on a target node. + If the instance exists elsewhere in a cluster, then it will not be replaced or moved. + The name should respond to same name of the node you see in C(lxc cluster list). + type: str + required: false + version_added: 1.0.0 + timeout: + description: + - A timeout for changing the state of the instance. + - This is also used as a timeout for waiting until IPv4 addresses + are set to the all network interfaces in the instance after + starting or restarting. + required: false + default: 30 + type: int + type: + description: + - Instance type can be either C(virtual-machine) or C(container). + required: false + default: container + choices: + - container + - virtual-machine + type: str + version_added: 4.1.0 + wait_for_ipv4_addresses: + description: + - If this is true, the C(lxd_container) waits until IPv4 addresses + are set to the all network interfaces in the instance after + starting or restarting. + required: false + default: false + type: bool + wait_for_container: + description: + - If set to C(true), the tasks will wait till the task reports a + success status when performing container operations. + default: false + type: bool + version_added: 4.4.0 + force_stop: + description: + - If this is true, the C(lxd_container) forces to stop the instance + when it stops or restarts the instance. + required: false + default: false + type: bool + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). + required: false + aliases: [ key_file ] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). + required: false + aliases: [ cert_file ] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before + running this module using the following command: + C(lxc config set core.trust_password ). + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false + type: str +notes: + - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance + with a name that already existed in the users namespace the module will + simply return as "unchanged". + - There are two ways to run commands inside a container or virtual machine, using the command + module or using the ansible lxd connection plugin bundled in Ansible >= + 2.1, the later requires python to be installed in the instance which can + be done with the command module. + - You can copy a file from the host to the instance + with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the C(community.general.lxd) connection plugin. + See the example below. + - You can copy a file in the created instance to the localhost + with C(command=lxc file pull instance_name/dir/filename filename). + See the first example below. +''' + +EXAMPLES = ''' +# An example for creating a Ubuntu container and install python +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + server: https://images.linuxcontainers.org + protocol: lxd # if you get a 404, try setting protocol: simplestreams + alias: ubuntu/xenial/amd64 + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + + - name: Check python is installed in container + delegate_to: mycontainer + ansible.builtin.raw: dpkg -s python + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: false + + - name: Install python in container + delegate_to: mycontainer + ansible.builtin.raw: apt-get install -y python + when: python_install_check.rc == 1 + +# An example for creating an Ubuntu 14.04 container using an image fingerprint. +# This requires changing 'server' and 'protocol' key values, replacing the +# 'alias' key with with 'fingerprint' and supplying an appropriate value that +# matches the container image you wish to use. +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + # Provides current (and older) Ubuntu images with listed fingerprints + server: https://cloud-images.ubuntu.com/releases + # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list') + protocol: simplestreams + # This provides an Ubuntu 14.04 LTS amd64 image from 20150814. + fingerprint: e9a8bdfab6dc + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example for creating container in project other than default +- hosts: localhost + connection: local + tasks: + - name: Create a started container in project mytestproject + community.general.lxd_container: + name: mycontainer + project: mytestproject + ignore_volatile_options: true + state: started + source: + protocol: simplestreams + type: image + mode: pull + server: https://images.linuxcontainers.org + alias: ubuntu/20.04/cloud + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + +# An example for deleting a container +- hosts: localhost + connection: local + tasks: + - name: Delete a container + community.general.lxd_container: + name: mycontainer + state: absent + type: container + +# An example for restarting a container +- hosts: localhost + connection: local + tasks: + - name: Restart a container + community.general.lxd_container: + name: mycontainer + state: restarted + type: container + +# An example for restarting a container using https to connect to the LXD server +- hosts: localhost + connection: local + tasks: + - name: Restart a container + community.general.lxd_container: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: mycontainer + state: restarted + +# Note your container must be in the inventory for the below example. +# +# [containers] +# mycontainer ansible_connection=lxd +# +- hosts: + - mycontainer + tasks: + - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" + ansible.builtin.fetch: + src: /etc/hosts + dest: /tmp/mycontainer-hosts + flat: true + +# An example for LXD cluster deployments. This example will create two new container on specific +# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster +# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. +# LXD API calls can be made to any LXD member, in this example, we send API requests to +#'node01.example.com', which matches ansible inventory name. +- hosts: node01.example.com + tasks: + - name: Create LXD container + community.general.lxd_container: + name: new-container-1 + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + alias: ubuntu/xenial/amd64 + target: node01 + + - name: Create container on another node + community.general.lxd_container: + name: new-container-2 + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + alias: ubuntu/xenial/amd64 + target: node02 + +# An example for creating a virtual machine +- hosts: localhost + connection: local + tasks: + - name: Create container on another node + community.general.lxd_container: + name: new-vm-1 + type: virtual-machine + state: started + ignore_volatile_options: true + wait_for_ipv4_addresses: true + profiles: ["default"] + source: + protocol: simplestreams + type: image + mode: pull + server: https://images.linuxcontainers.org + alias: debian/11 + timeout: 600 +''' + +RETURN = ''' +addresses: + description: Mapping from the network device name to a list of IPv4 addresses in the instance. + returned: when state is started or restarted + type: dict + sample: {"eth0": ["10.155.92.191"]} +old_state: + description: The old state of the instance. + returned: when state is started or restarted + type: str + sample: "stopped" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the instance. + returned: success + type: list + sample: ["create", "start"] +''' +import copy +import datetime +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible.module_utils.six.moves.urllib.parse import urlencode + +# LXD_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXD_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen', +} + +# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible +# lxc_container module state parameter value. +ANSIBLE_LXD_STATES = { + 'Running': 'started', + 'Stopped': 'stopped', + 'Frozen': 'frozen', +} + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' +] + +# CONFIG_CREATION_PARAMS is a list of attribute names that are only applied +# on instance creation. +CONFIG_CREATION_PARAMS = ['source'] + + +class LXDContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self.project = self.module.params['project'] + self._build_config() + + self.state = self.module.params['state'] + + self.timeout = self.module.params['timeout'] + self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] + self.force_stop = self.module.params['force_stop'] + self.addresses = None + self.target = self.module.params['target'] + self.wait_for_container = self.module.params['wait_for_container'] + + self.type = self.module.params['type'] + + # LXD Rest API provides additional endpoints for creating containers and virtual-machines. + self.api_endpoint = None + if self.type == 'container': + self.api_endpoint = '/1.0/containers' + elif self.type == 'virtual-machine': + self.api_endpoint = '/1.0/virtual-machines' + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + self.diff = {'before': {}, 'after': {}} + self.old_instance_json = {} + self.old_sections = {} + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_instance_json(self): + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + def _get_instance_state_json(self): + url = '{0}/{1}/state'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + @staticmethod + def _instance_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] + + def _change_state(self, action, force_stop=False): + url = '{0}/{1}/state'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + body_json = {'action': action, 'timeout': self.timeout} + if force_stop: + body_json['force'] = True + if not self.module.check_mode: + return self.client.do('PUT', url, body_json=body_json) + + def _create_instance(self): + url = self.api_endpoint + url_params = dict() + if self.target: + url_params['target'] = self.target + if self.project: + url_params['project'] = self.project + if url_params: + url = '{0}?{1}'.format(url, urlencode(url_params)) + config = self.config.copy() + config['name'] = self.name + if not self.module.check_mode: + self.client.do('POST', url, config, wait_for_container=self.wait_for_container) + self.actions.append('create') + + def _start_instance(self): + self._change_state('start') + self.actions.append('start') + + def _stop_instance(self): + self._change_state('stop', self.force_stop) + self.actions.append('stop') + + def _restart_instance(self): + self._change_state('restart', self.force_stop) + self.actions.append('restart') + + def _delete_instance(self): + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + if not self.module.check_mode: + self.client.do('DELETE', url) + self.actions.append('delete') + + def _freeze_instance(self): + self._change_state('freeze') + self.actions.append('freeze') + + def _unfreeze_instance(self): + self._change_state('unfreeze') + self.actions.append('unfreeze') + + def _instance_ipv4_addresses(self, ignore_devices=None): + ignore_devices = ['lo'] if ignore_devices is None else ignore_devices + data = (self._get_instance_state_json() or {}).get('metadata', None) or {} + network = dict((k, v) for k, v in (data.get('network', None) or {}).items() if k not in ignore_devices) + addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) + return addresses + + @staticmethod + def _has_all_ipv4_addresses(addresses): + return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values()) + + def _get_addresses(self): + try: + due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) + while datetime.datetime.now() < due: + time.sleep(1) + addresses = self._instance_ipv4_addresses() + if self._has_all_ipv4_addresses(addresses) or self.module.check_mode: + self.addresses = addresses + return + except LXDClientException as e: + e.msg = 'timeout for getting IPv4 addresses' + raise + + def _started(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + elif self.old_state == 'stopped': + self._start_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _stopped(self): + if self.old_state == 'absent': + self._create_instance() + else: + if self.old_state == 'stopped': + if self._needs_to_apply_instance_configs(): + self._start_instance() + self._apply_instance_configs() + self._stop_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._stop_instance() + + def _restarted(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + else: + if self.old_state == 'frozen': + self._unfreeze_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._restart_instance() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _destroyed(self): + if self.old_state != 'absent': + if self.old_state == 'frozen': + self._unfreeze_instance() + if self.old_state != 'stopped': + self._stop_instance() + self._delete_instance() + + def _frozen(self): + if self.old_state == 'absent': + self._create_instance() + self._start_instance() + self._freeze_instance() + else: + if self.old_state == 'stopped': + self._start_instance() + if self._needs_to_apply_instance_configs(): + self._apply_instance_configs() + self._freeze_instance() + + def _needs_to_change_instance_config(self, key): + if key not in self.config: + return False + + if key == 'config': + # self.old_sections is already filtered for volatile keys if necessary + old_configs = dict(self.old_sections.get(key, None) or {}) + for k, v in self.config['config'].items(): + if k not in old_configs: + return True + if old_configs[k] != v: + return True + return False + else: + old_configs = self.old_sections.get(key, {}) + return self.config[key] != old_configs + + def _needs_to_apply_instance_configs(self): + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if self._needs_to_change_instance_config(param): + return True + return False + + def _apply_instance_configs(self): + old_metadata = copy.deepcopy(self.old_instance_json).get('metadata', None) or {} + body_json = {} + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if param in old_metadata: + body_json[param] = old_metadata[param] + + if self._needs_to_change_instance_config(param): + if param == 'config': + body_json['config'] = body_json.get('config', None) or {} + for k, v in self.config['config'].items(): + body_json['config'][k] = v + else: + body_json[param] = self.config[param] + self.diff['after']['instance'] = body_json + url = '{0}/{1}'.format(self.api_endpoint, self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + if not self.module.check_mode: + self.client.do('PUT', url, body_json=body_json) + self.actions.append('apply_instance_configs') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') + + self.old_instance_json = self._get_instance_json() + self.old_sections = dict( + (section, content) if not isinstance(content, dict) + else (section, dict((k, v) for k, v in content.items() + if not (self.ignore_volatile_options and k.startswith('volatile.')))) + for section, content in (self.old_instance_json.get('metadata', None) or {}).items() + if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) + ) + + self.diff['before']['instance'] = self.old_sections + # preliminary, will be overwritten in _apply_instance_configs() if called + self.diff['after']['instance'] = self.config + + self.old_state = self._instance_json_to_module_state(self.old_instance_json) + self.diff['before']['state'] = self.old_state + self.diff['after']['state'] = self.state + + action = getattr(self, LXD_ANSIBLE_STATES[self.state]) + action() + + state_changed = len(self.actions) > 0 + result_json = { + 'log_verbosity': self.module._verbosity, + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions, + 'diff': self.diff, + } + if self.client.debug: + result_json['logs'] = self.client.logs + if self.addresses is not None: + result_json['addresses'] = self.addresses + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions, + 'diff': self.diff, + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True, + ), + project=dict( + type='str', + ), + architecture=dict( + type='str', + ), + config=dict( + type='dict', + ), + ignore_volatile_options=dict( + type='bool', + default=False, + ), + devices=dict( + type='dict', + ), + ephemeral=dict( + type='bool', + ), + profiles=dict( + type='list', + elements='str', + ), + source=dict( + type='dict', + ), + state=dict( + choices=list(LXD_ANSIBLE_STATES.keys()), + default='started', + ), + target=dict( + type='str', + ), + timeout=dict( + type='int', + default=30 + ), + type=dict( + type='str', + default='container', + choices=['container', 'virtual-machine'], + ), + wait_for_container=dict( + type='bool', + default=False, + ), + wait_for_ipv4_addresses=dict( + type='bool', + default=False, + ), + force_stop=dict( + type='bool', + default=False, + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL, + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket', + ), + client_key=dict( + type='path', + aliases=['key_file'], + ), + client_cert=dict( + type='path', + aliases=['cert_file'], + ), + trust_password=dict(type='str', no_log=True), + ), + supports_check_mode=True, + ) + + lxd_manage = LXDContainerManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxd_profile.py b/ansible_collections/community/general/plugins/modules/lxd_profile.py new file mode 100644 index 000000000..45f499b78 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxd_profile.py @@ -0,0 +1,563 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Hiroaki Nakamura +# Copyright (c) 2020, Frank Dornheim +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: lxd_profile +short_description: Manage LXD profiles +description: + - Management of LXD profiles +author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of a profile. + required: true + type: str + project: + description: + - 'Project of a profile. + See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).' + type: str + required: false + version_added: 4.8.0 + description: + description: + - Description of the profile. + type: str + config: + description: + - 'The config for the instance (e.g. {"limits.memory": "4GB"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + - If the profile already exists and its "config" value in metadata + obtained from + GET /1.0/profiles/ + U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) + are different, they this module tries to apply the configurations. + - Not all config values are supported to apply the existing profile. + Maybe you need to delete and recreate a profile. + required: false + type: dict + devices: + description: + - 'The devices for the profile + (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + required: false + type: dict + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile will be renamed to this name. + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) + required: false + type: str + merge_profile: + description: + - Merge the configuration of the present profile with the new desired configuration, + instead of replacing it. + required: false + default: false + type: bool + version_added: 2.1.0 + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + type: str + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [ key_file ] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [ cert_file ] + type: path + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before + running this module using the following command. + lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false + type: str +notes: + - Profiles must have a unique name. If you attempt to create a profile + with a name that already existed in the users namespace the module will + simply return as "unchanged". +''' + +EXAMPLES = ''' +# An example for creating a profile +- hosts: localhost + connection: local + tasks: + - name: Create a profile + community.general.lxd_profile: + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for creating a profile in project mytestproject +- hosts: localhost + connection: local + tasks: + - name: Create a profile + community.general.lxd_profile: + name: testprofile + project: mytestproject + state: present + config: {} + description: test profile in project mytestproject + devices: {} + +# An example for creating a profile via http connection +- hosts: localhost + connection: local + tasks: + - name: Create macvlan profile + community.general.lxd_profile: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for modify/merge a profile +- hosts: localhost + connection: local + tasks: + - name: Merge a profile + community.general.lxd_profile: + merge_profile: true + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for deleting a profile +- hosts: localhost + connection: local + tasks: + - name: Delete a profile + community.general.lxd_profile: + name: macvlan + state: absent + +# An example for renaming a profile +- hosts: localhost + connection: local + tasks: + - name: Rename a profile + community.general.lxd_profile: + name: macvlan + new_name: macvlan2 + state: present +''' + +RETURN = ''' +old_state: + description: The old state of the profile + returned: success + type: str + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the profile. + returned: success + type: list + sample: ["create"] +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible.module_utils.six.moves.urllib.parse import urlencode + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# PROFILE_STATES is a list for states supported +PROFILES_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description', 'devices' +] + + +class LXDProfileManagement(object): + def __init__(self, module): + """Management of LXC profiles via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self.project = self.module.params['project'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME']) + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_profile_json(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + return self.client.do('GET', url, ok_error_codes=[404]) + + @staticmethod + def _profile_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_profile(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile does not exist and the state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_profile() + if self._needs_to_apply_profile_configs(): + self._apply_profile_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile exists and the specified state is absent', + changed=False) + + def _create_profile(self): + url = '/1.0/profiles' + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', url, config) + self.actions.append('create') + + def _rename_profile(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + config = {'name': self.new_name} + self.client.do('POST', url, config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_profile_config(self, key): + if key not in self.config: + return False + old_configs = self.old_profile_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_profile_configs(self): + return ( + self._needs_to_change_profile_config('config') or + self._needs_to_change_profile_config('description') or + self._needs_to_change_profile_config('devices') + ) + + def _merge_dicts(self, source, destination): + """Merge Dictionaries + + Get a list of filehandle numbers from logger to be handed to + DaemonContext.files_preserve + + Args: + dict(source): source dict + dict(destination): destination dict + Kwargs: + None + Raises: + None + Returns: + dict(destination): merged dict""" + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + self._merge_dicts(value, node) + else: + destination[key] = value + return destination + + def _merge_config(self, config): + """ merge profile + + Merge Configuration of the present profile and the new desired configitems + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" + # merge or copy the sections from the existing profile to 'config' + for item in ['config', 'description', 'devices', 'name', 'used_by']: + if item in config: + config[item] = self._merge_dicts(config['metadata'][item], config[item]) + else: + config[item] = config['metadata'][item] + # merge or copy the sections from the ansible-task to 'config' + return self._merge_dicts(self.config, config) + + def _generate_new_config(self, config): + """ rebuild profile + + Rebuild the Profile by the configuration provided in the play. + Existing configurations are discarded. + + This ist the default behavior. + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" + for k, v in self.config.items(): + config[k] = v + return config + + def _apply_profile_configs(self): + """ Selection of the procedure: rebuild or merge + + The standard behavior is that all information not contained + in the play is discarded. + + If "merge_profile" is provides in the play and "True", then existing + configurations from the profile and new ones defined are merged. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + config = self.old_profile_json.copy() + if self.module.params['merge_profile']: + config = self._merge_config(config) + else: + config = self._generate_new_config(config) + + # upload config to lxd + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + self.client.do('PUT', url, config) + self.actions.append('apply_profile_configs') + + def _delete_profile(self): + url = '/1.0/profiles/{0}'.format(self.name) + if self.project: + url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) + self.client.do('DELETE', url) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_profile_json = self._get_profile_json() + self.old_state = self._profile_json_to_module_state(self.old_profile_json) + self._update_profile() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + project=dict( + type='str', + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + devices=dict( + type='dict', + ), + merge_profile=dict( + type='bool', + default=False + ), + state=dict( + choices=PROFILES_STATES, + default='present' + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='path', + aliases=['key_file'] + ), + client_cert=dict( + type='path', + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProfileManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/lxd_project.py b/ansible_collections/community/general/plugins/modules/lxd_project.py new file mode 100644 index 000000000..983531fa0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/lxd_project.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: lxd_project +short_description: Manage LXD projects +version_added: 4.8.0 +description: + - Management of LXD projects. +author: "Raymond Chang (@we10710aa)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the project. + required: true + type: str + description: + description: + - Description of the project. + type: str + config: + description: + - 'The config for the project (for example C({"features.profiles": "true"})). + See U(https://linuxcontainers.org/lxd/docs/master/projects/).' + - If the project already exists and its "config" value in metadata + obtained from + C(GET /1.0/projects/) + U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_get) + are different, then this module tries to apply the configurations. + type: dict + new_name: + description: + - A new name of a project. + - If this parameter is specified a project will be renamed to this name. + See U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_post). + required: false + type: str + merge_project: + description: + - Merge the configuration of the present project with the new desired configuration, + instead of replacing it. If configuration is the same after merged, no change will be made. + required: false + default: false + type: bool + state: + choices: + - present + - absent + description: + - Define the state of a project. + required: false + default: present + type: str + url: + description: + - The Unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The Unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [ key_file ] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [ cert_file ] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before + running this module using the following command: + C(lxc config set core.trust_password ) + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If I(trust_password) is set, this module send a request for + authentication before sending any requests. + required: false + type: str +notes: + - Projects must have a unique name. If you attempt to create a project + with a name that already existed in the users namespace the module will + simply return as "unchanged". +''' + +EXAMPLES = ''' +# An example for creating a project +- hosts: localhost + connection: local + tasks: + - name: Create a project + community.general.lxd_project: + name: ansible-test-project + state: present + config: {} + description: my new project + +# An example for renaming a project +- hosts: localhost + connection: local + tasks: + - name: Rename ansible-test-project to ansible-test-project-new-name + community.general.lxd_project: + name: ansible-test-project + new_name: ansible-test-project-new-name + state: present + config: {} + description: my new project +''' + +RETURN = ''' +old_state: + description: The old state of the project. + returned: success + type: str + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + elements: dict + contains: + type: + description: Type of actions performed, currently only C(sent request). + type: str + sample: "sent request" + request: + description: HTTP request sent to LXD server. + type: dict + contains: + method: + description: Method of HTTP request. + type: str + sample: "GET" + url: + description: URL path of HTTP request. + type: str + sample: "/1.0/projects/test-project" + json: + description: JSON body of HTTP request. + type: str + sample: "(too long to be placed here)" + timeout: + description: Timeout of HTTP request, C(null) if unset. + type: int + sample: null + response: + description: HTTP response received from LXD server. + type: dict + contains: + json: + description: JSON of HTTP response. + type: str + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the project. + returned: success + type: list + elements: str + sample: ["create"] +''' + +from ansible_collections.community.general.plugins.module_utils.lxd import ( + LXDClient, LXDClientException, default_key_file, default_cert_file +) +from ansible.module_utils.basic import AnsibleModule +import os + +# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint +ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' + +# PROJECTS_STATES is a list for states supported +PROJECTS_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description' +] + + +class LXDProjectManagement(object): + def __init__(self, module): + """Management of LXC projects via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.key_file = self.module.params.get('client_key') + if self.key_file is None: + self.key_file = default_key_file() + self.cert_file = self.module.params.get('client_cert') + if self.cert_file is None: + self.cert_file = default_cert_file() + self.debug = self.module._verbosity >= 4 + + try: + if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL: + self.url = self.module.params['url'] + elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')): + self.url = self.module.params['snap_url'] + else: + self.url = self.module.params['url'] + except Exception as e: + self.module.fail_json(msg=e.msg) + + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_project_json(self): + return self.client.do( + 'GET', '/1.0/projects/{0}'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _project_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_project(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_project() + else: + self.module.fail_json( + msg='new_name must not be set when the project does not exist and the state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_project() + if self._needs_to_apply_project_configs(): + self._apply_project_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_project() + else: + self.module.fail_json( + msg='new_name must not be set when the project exists and the specified state is absent', + changed=False) + + def _create_project(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/projects', config) + self.actions.append('create') + + def _rename_project(self): + config = {'name': self.new_name} + self.client.do('POST', '/1.0/projects/{0}'.format(self.name), config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_project_config(self, key): + if key not in self.config: + return False + old_configs = self.old_project_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_project_configs(self): + return ( + self._needs_to_change_project_config('config') or + self._needs_to_change_project_config('description') + ) + + def _merge_dicts(self, source, destination): + """ Return a new dict that merge two dict, + with values in source dict overwrite destination dict + + Args: + dict(source): source dict + dict(destination): destination dict + Kwargs: + None + Raises: + None + Returns: + dict(destination): merged dict""" + result = destination.copy() + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = result.setdefault(key, {}) + self._merge_dicts(value, node) + else: + result[key] = value + return result + + def _apply_project_configs(self): + """ Selection of the procedure: rebuild or merge + + The standard behavior is that all information not contained + in the play is discarded. + + If "merge_project" is provides in the play and "True", then existing + configurations from the project and new ones defined are merged. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + old_config = dict() + old_metadata = self.old_project_json['metadata'].copy() + for attr in CONFIG_PARAMS: + old_config[attr] = old_metadata[attr] + + if self.module.params['merge_project']: + config = self._merge_dicts(self.config, old_config) + if config == old_config: + # no need to call api if merged config is the same + # as old config + return + else: + config = self.config.copy() + # upload config to lxd + self.client.do('PUT', '/1.0/projects/{0}'.format(self.name), config) + self.actions.append('apply_projects_configs') + + def _delete_project(self): + self.client.do('DELETE', '/1.0/projects/{0}'.format(self.name)) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_project_json = self._get_project_json() + self.old_state = self._project_json_to_module_state( + self.old_project_json) + self._update_project() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + merge_project=dict( + type='bool', + default=False + ), + state=dict( + choices=PROJECTS_STATES, + default='present' + ), + url=dict( + type='str', + default=ANSIBLE_LXD_DEFAULT_URL + ), + snap_url=dict( + type='str', + default='unix:/var/snap/lxd/common/lxd/unix.socket' + ), + client_key=dict( + type='path', + aliases=['key_file'] + ), + client_cert=dict( + type='path', + aliases=['cert_file'] + ), + trust_password=dict(type='str', no_log=True) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProjectManagement(module=module) + lxd_manage.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/macports.py b/ansible_collections/community/general/plugins/modules/macports.py new file mode 100644 index 000000000..6f40d0938 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/macports.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Jimmy Tang +# Based on okpg (Patrick Pelletier ), pacman +# (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: macports +author: "Jimmy Tang (@jcftang)" +short_description: Package manager for MacPorts +description: + - Manages MacPorts packages (ports) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - A list of port names. + aliases: ['port'] + type: list + elements: str + selfupdate: + description: + - Update Macports and the ports tree, either prior to installing ports or as a separate step. + - Equivalent to running C(port selfupdate). + aliases: ['update_cache', 'update_ports'] + default: false + type: bool + state: + description: + - Indicates the desired state of the port. + choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed'] + default: present + type: str + upgrade: + description: + - Upgrade all outdated ports, either prior to installing ports or as a separate step. + - Equivalent to running C(port upgrade outdated). + default: false + type: bool + variant: + description: + - A port variant specification. + - 'C(variant) is only supported with state: I(installed)/I(present).' + aliases: ['variants'] + type: str +''' +EXAMPLES = ''' +- name: Install the foo port + community.general.macports: + name: foo + +- name: Install the universal, x11 variant of the foo port + community.general.macports: + name: foo + variant: +universal+x11 + +- name: Install a list of ports + community.general.macports: + name: "{{ ports }}" + vars: + ports: + - foo + - foo-tools + +- name: Update Macports and the ports tree, then upgrade all outdated ports + community.general.macports: + selfupdate: true + upgrade: true + +- name: Update Macports and the ports tree, then install the foo port + community.general.macports: + name: foo + selfupdate: true + +- name: Remove the foo port + community.general.macports: + name: foo + state: absent + +- name: Activate the foo port + community.general.macports: + name: foo + state: active + +- name: Deactivate the foo port + community.general.macports: + name: foo + state: inactive +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def selfupdate(module, port_path): + """ Update Macports and the ports tree. """ + + rc, out, err = module.run_command("%s -v selfupdate" % port_path) + + if rc == 0: + updated = any( + re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or + re.search(r'Installing new Macports release', s.strip()) + for s in out.split('\n') + if s + ) + if updated: + changed = True + msg = "Macports updated successfully" + else: + changed = False + msg = "Macports already up-to-date" + + return (changed, msg, out, err) + else: + module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err) + + +def upgrade(module, port_path): + """ Upgrade outdated ports. """ + + rc, out, err = module.run_command("%s upgrade outdated" % port_path) + + # rc is 1 when nothing to upgrade so check stdout first. + if out.strip() == "Nothing to upgrade.": + changed = False + msg = "Ports already upgraded" + return (changed, msg, out, err) + elif rc == 0: + changed = True + msg = "Outdated ports upgraded successfully" + return (changed, msg, out, err) + else: + module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err) + + +def query_port(module, port_path, name, state="present"): + """ Returns whether a port is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command([port_path, "-q", "installed", name]) + + if rc == 0 and out.strip().startswith(name + " "): + return True + + return False + + elif state == "active": + + rc, out, err = module.run_command([port_path, "-q", "installed", name]) + + if rc == 0 and "(active)" in out: + return True + + return False + + +def remove_ports(module, port_path, ports, stdout, stderr): + """ Uninstalls one or more ports if installed. """ + + remove_c = 0 + # Using a for loop in case of error, we can report the port that failed + for port in ports: + # Query the port first, to see if we even need to remove + if not query_port(module, port_path, port): + continue + + rc, out, err = module.run_command("%s uninstall %s" % (port_path, port)) + stdout += out + stderr += err + if query_port(module, port_path, port): + module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr) + + +def install_ports(module, port_path, ports, variant, stdout, stderr): + """ Installs one or more ports if not already installed. """ + + install_c = 0 + + for port in ports: + if query_port(module, port_path, port): + continue + + rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant)) + stdout += out + stderr += err + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr) + + +def activate_ports(module, port_path, ports, stdout, stderr): + """ Activate a port if it's inactive. """ + + activate_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) + + if query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command("%s activate %s" % (port_path, port)) + stdout += out + stderr += err + + if not query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + activate_c += 1 + + if activate_c > 0: + module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr) + + +def deactivate_ports(module, port_path, ports, stdout, stderr): + """ Deactivate a port if it's active. """ + + deactivated_c = 0 + + for port in ports: + if not query_port(module, port_path, port): + module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr) + + if not query_port(module, port_path, port, state="active"): + continue + + rc, out, err = module.run_command("%s deactivate %s" % (port_path, port)) + stdout += out + stderr += err + if query_port(module, port_path, port, state="active"): + module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr) + + deactivated_c += 1 + + if deactivated_c > 0: + module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr) + + module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', aliases=["port"]), + selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), + state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), + upgrade=dict(default=False, type='bool'), + variant=dict(aliases=["variants"], default=None, type='str') + ) + ) + + stdout = "" + stderr = "" + + port_path = module.get_bin_path('port', True, ['/opt/local/bin']) + + p = module.params + + if p["selfupdate"]: + (changed, msg, out, err) = selfupdate(module, port_path) + stdout += out + stderr += err + if not (p["name"] or p["upgrade"]): + module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) + + if p["upgrade"]: + (changed, msg, out, err) = upgrade(module, port_path) + stdout += out + stderr += err + if not p["name"]: + module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr) + + pkgs = p["name"] + + variant = p["variant"] + + if p["state"] in ["present", "installed"]: + install_ports(module, port_path, pkgs, variant, stdout, stderr) + + elif p["state"] in ["absent", "removed"]: + remove_ports(module, port_path, pkgs, stdout, stderr) + + elif p["state"] == "active": + activate_ports(module, port_path, pkgs, stdout, stderr) + + elif p["state"] == "inactive": + deactivate_ports(module, port_path, pkgs, stdout, stderr) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mail.py b/ansible_collections/community/general/plugins/modules/mail.py new file mode 100644 index 000000000..feaac6923 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mail.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +author: +- Dag Wieers (@dagwieers) +module: mail +short_description: Send an email +description: +- This module is useful for sending emails from playbooks. +- One may wonder why automate sending emails? In complex environments + there are from time to time processes that cannot be automated, either + because you lack the authority to make it so, or because not everyone + agrees to a common approach. +- If you cannot automate a specific step, but the step is non-blocking, + sending out an email to the responsible party to make them perform their + part of the bargain is an elegant way to put the responsibility in + someone else's lap. +- Of course sending out a mail can be equally useful as a way to notify + one or more people in a team that a specific action has been + (successfully) taken. +extends_documentation_fragment: +- community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + sender: + description: + - The email-address the mail is sent from. May contain address and phrase. + type: str + default: root + aliases: [ from ] + to: + description: + - The email-address(es) the mail is being sent to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: root + aliases: [ recipients ] + cc: + description: + - The email-address(es) the mail is being copied to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: [] + bcc: + description: + - The email-address(es) the mail is being 'blind' copied to. + - This is a list, which may contain address and phrase portions. + type: list + elements: str + default: [] + subject: + description: + - The subject of the email being sent. + required: true + type: str + aliases: [ msg ] + body: + description: + - The body of the email being sent. + type: str + username: + description: + - If SMTP requires username. + type: str + password: + description: + - If SMTP requires password. + type: str + host: + description: + - The mail server. + type: str + default: localhost + port: + description: + - The mail server port. + - This must be a valid integer between 1 and 65534 + type: int + default: 25 + attach: + description: + - A list of pathnames of files to attach to the message. + - Attached files will have their content-type set to C(application/octet-stream). + type: list + elements: path + default: [] + headers: + description: + - A list of headers which should be added to the message. + - Each individual header is specified as C(header=value) (see example below). + type: list + elements: str + default: [] + charset: + description: + - The character set of email being sent. + type: str + default: utf-8 + subtype: + description: + - The minor mime type, can be either C(plain) or C(html). + - The major type is always C(text). + type: str + choices: [ html, plain ] + default: plain + secure: + description: + - If C(always), the connection will only send email if the connection is Encrypted. + If the server doesn't accept the encrypted connection it will fail. + - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. + - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending + - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. + If it is unable to do so it will fail. + type: str + choices: [ always, never, starttls, try ] + default: try + timeout: + description: + - Sets the timeout in seconds for connection attempts. + type: int + default: 20 + ehlohost: + description: + - Allows for manual specification of host for EHLO. + type: str + version_added: 3.8.0 +''' + +EXAMPLES = r''' +- name: Example playbook sending mail to root + community.general.mail: + subject: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Sending an e-mail using Gmail SMTP servers + community.general.mail: + host: smtp.gmail.com + port: 587 + username: username@gmail.com + password: mysecret + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + delegate_to: localhost + +- name: Send e-mail to a bunch of users, attaching files + community.general.mail: + host: 127.0.0.1 + port: 2025 + subject: Ansible-report + body: Hello, this is an e-mail. I hope you like it ;-) + from: jane@example.net (Jane Jolie) + to: + - John Doe + - Suzie Something + cc: Charlie Root + attach: + - /etc/group + - /tmp/avatar2.png + headers: + - Reply-To=john@example.com + - X-Special="Something or other" + charset: us-ascii + delegate_to: localhost + +- name: Sending an e-mail using the remote machine, not the Ansible controller node + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + +- name: Sending an e-mail using Legacy SSL to the remote machine + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: always + +- name: Sending an e-mail using StartTLS to the remote machine + community.general.mail: + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: starttls + +- name: Sending an e-mail using StartTLS, remote server, custom EHLO + community.general.mail: + host: some.smtp.host.tld + port: 25 + ehlohost: my-resolvable-hostname.tld + to: John Smith + subject: Ansible-report + body: System {{ ansible_hostname }} has been successfully provisioned. + secure: starttls +''' + +import os +import smtplib +import ssl +import traceback +from email import encoders +from email.utils import parseaddr, formataddr, formatdate +from email.mime.base import MIMEBase +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from email.header import Header + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY3 +from ansible.module_utils.common.text.converters import to_native + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + username=dict(type='str'), + password=dict(type='str', no_log=True), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=25), + ehlohost=dict(type='str', default=None), + sender=dict(type='str', default='root', aliases=['from']), + to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), + cc=dict(type='list', elements='str', default=[]), + bcc=dict(type='list', elements='str', default=[]), + subject=dict(type='str', required=True, aliases=['msg']), + body=dict(type='str'), + attach=dict(type='list', elements='path', default=[]), + headers=dict(type='list', elements='str', default=[]), + charset=dict(type='str', default='utf-8'), + subtype=dict(type='str', default='plain', choices=['html', 'plain']), + secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), + timeout=dict(type='int', default=20), + ), + required_together=[['password', 'username']], + ) + + username = module.params.get('username') + password = module.params.get('password') + host = module.params.get('host') + port = module.params.get('port') + local_hostname = module.params.get('ehlohost') + sender = module.params.get('sender') + recipients = module.params.get('to') + copies = module.params.get('cc') + blindcopies = module.params.get('bcc') + subject = module.params.get('subject') + body = module.params.get('body') + attach_files = module.params.get('attach') + headers = module.params.get('headers') + charset = module.params.get('charset') + subtype = module.params.get('subtype') + secure = module.params.get('secure') + timeout = module.params.get('timeout') + + code = 0 + secure_state = False + sender_phrase, sender_addr = parseaddr(sender) + + if not body: + body = subject + + try: + if secure != 'never': + try: + if PY3: + smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) + else: + smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + secure_state = True + except ssl.SSLError as e: + if secure == 'always': + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + except Exception: + pass + + if not secure_state: + if PY3: + smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) + else: + smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout) + code, smtpmessage = smtp.connect(host, port) + + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + + if int(code) > 0: + if not secure_state and secure in ('starttls', 'try'): + if smtp.has_extn('STARTTLS'): + try: + smtp.starttls() + secure_state = True + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % + (host, port, to_native(e)), exception=traceback.format_exc()) + try: + smtp.ehlo() + except smtplib.SMTPException as e: + module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc()) + else: + if secure == 'starttls': + module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port)) + + if username and password: + if smtp.has_extn('AUTH'): + try: + smtp.login(username, password) + except smtplib.SMTPAuthenticationError: + module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port)) + except smtplib.SMTPException: + module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port)) + else: + module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port)) + + if not secure_state and (username and password): + module.warn('Username and Password was sent without encryption') + + msg = MIMEMultipart(_charset=charset) + msg['From'] = formataddr((sender_phrase, sender_addr)) + msg['Date'] = formatdate(localtime=True) + msg['Subject'] = Header(subject, charset) + msg.preamble = "Multipart message" + + for header in headers: + # NOTE: Backward compatible with old syntax using '|' as delimiter + for hdr in [x.strip() for x in header.split('|')]: + try: + h_key, h_val = hdr.split('=') + h_val = to_native(Header(h_val, charset)) + msg.add_header(h_key, h_val) + except Exception: + module.warn("Skipping header '%s', unable to parse" % hdr) + + if 'X-Mailer' not in msg: + msg.add_header('X-Mailer', 'Ansible mail module') + + addr_list = [] + for addr in [x.strip() for x in blindcopies]: + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + + to_list = [] + for addr in [x.strip() for x in recipients]: + to_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['To'] = ", ".join(to_list) + + cc_list = [] + for addr in [x.strip() for x in copies]: + cc_list.append(formataddr(parseaddr(addr))) + addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase + msg['Cc'] = ", ".join(cc_list) + + part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) + msg.attach(part) + + # NOTE: Backware compatibility with old syntax using space as delimiter is not retained + # This breaks files with spaces in it :-( + for filename in attach_files: + try: + part = MIMEBase('application', 'octet-stream') + with open(filename, 'rb') as fp: + part.set_payload(fp.read()) + encoders.encode_base64(part) + part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename)) + msg.attach(part) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" % + (filename, to_native(e)), exception=traceback.format_exc()) + + composed = msg.as_string() + + try: + result = smtp.sendmail(sender_addr, set(addr_list), composed) + except Exception as e: + module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" % + (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc()) + + smtp.quit() + + if result: + for key in result: + module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1])) + module.exit_json(msg='Failed to send mail to at least one recipient', result=result) + + module.exit_json(msg='Mail sent successfully', result=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/make.py b/ansible_collections/community/general/plugins/modules/make.py new file mode 100644 index 000000000..ebff6cfe1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/make.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Linus Unnebäck +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: make +short_description: Run targets in a Makefile +requirements: + - make +author: Linus Unnebäck (@LinusU) +description: + - Run targets in a Makefile. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + chdir: + description: + - Change to this directory before running make. + type: path + required: true + file: + description: + - Use a custom Makefile. + type: path + jobs: + description: + - Set the number of make jobs to run concurrently. + - Typically if set, this would be the number of processors and/or threads available to the machine. + - This is not supported by all make implementations. + type: int + version_added: 2.0.0 + make: + description: + - Use a specific make binary. + type: path + version_added: '0.2.0' + params: + description: + - Any extra parameters to pass to make. + type: dict + target: + description: + - The target to run. + - Typically this would be something like C(install), C(test), or C(all). + type: str +''' + +EXAMPLES = r''' +- name: Build the default target + community.general.make: + chdir: /home/ubuntu/cool-project + +- name: Run 'install' target as root + community.general.make: + chdir: /home/ubuntu/cool-project + target: install + become: true + +- name: Build 'all' target with extra arguments + community.general.make: + chdir: /home/ubuntu/cool-project + target: all + params: + NUM_THREADS: 4 + BACKEND: lapack + +- name: Build 'all' target with a custom Makefile + community.general.make: + chdir: /home/ubuntu/cool-project + target: all + file: /some-project/Makefile +''' + +RETURN = r''' +chdir: + description: + - The value of the module parameter I(chdir). + type: str + returned: success +command: + description: + - The command built and executed by the module. + type: str + returned: success + version_added: 6.5.0 +file: + description: + - The value of the module parameter I(file). + type: str + returned: success +jobs: + description: + - The value of the module parameter I(jobs). + type: int + returned: success +params: + description: + - The value of the module parameter I(params). + type: dict + returned: success +target: + description: + - The value of the module parameter I(target). + type: str + returned: success +''' + +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils.basic import AnsibleModule + + +def run_command(command, module, check_rc=True): + """ + Run a command using the module, return + the result code and std{err,out} content. + + :param command: list of command arguments + :param module: Ansible make module instance + :return: return code, stdout content, stderr content + """ + rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) + return rc, sanitize_output(out), sanitize_output(err) + + +def sanitize_output(output): + """ + Sanitize the output string before we + pass it to module.fail_json. Defaults + the string to empty if it is None, else + strips trailing newlines. + + :param output: output to sanitize + :return: sanitized output + """ + if output is None: + return '' + else: + return output.rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + target=dict(type='str'), + params=dict(type='dict'), + chdir=dict(type='path', required=True), + file=dict(type='path'), + make=dict(type='path'), + jobs=dict(type='int'), + ), + supports_check_mode=True, + ) + + make_path = module.params['make'] + if make_path is None: + # Build up the invocation of `make` we are going to use + # For non-Linux OSes, prefer gmake (GNU make) over make + make_path = module.get_bin_path('gmake', required=False) + if not make_path: + # Fall back to system make + make_path = module.get_bin_path('make', required=True) + make_target = module.params['target'] + if module.params['params'] is not None: + make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] + else: + make_parameters = [] + + # build command: + # handle any make specific arguments included in params + base_command = [make_path] + if module.params['jobs'] is not None: + jobs = str(module.params['jobs']) + base_command.extend(["-j", jobs]) + if module.params['file'] is not None: + base_command.extend(["-f", module.params['file']]) + + # add make target + base_command.append(make_target) + + # add makefile parameters + base_command.extend(make_parameters) + + # Check if the target is already up to date + rc, out, err = run_command(base_command + ['-q'], module, check_rc=False) + if module.check_mode: + # If we've been asked to do a dry run, we only need + # to report whether or not the target is up to date + changed = (rc != 0) + else: + if rc == 0: + # The target is up to date, so we don't have to + # do anything + changed = False + else: + # The target isn't up to date, so we need to run it + rc, out, err = run_command(base_command, module, + check_rc=True) + changed = True + + # We don't report the return code, as if this module failed + # we would be calling fail_json from run_command, so even if + # we had a non-zero return code, we did not fail. However, if + # we report a non-zero return code here, we will be marked as + # failed regardless of what we signal using the failed= kwarg. + module.exit_json( + changed=changed, + failed=False, + stdout=out, + stderr=err, + target=module.params['target'], + params=module.params['params'], + chdir=module.params['chdir'], + file=module.params['file'], + jobs=module.params['jobs'], + command=' '.join([shlex_quote(part) for part in base_command]), + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py new file mode 100644 index 000000000..c6cefad6a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_alert_profiles + +short_description: Configuration of alert profiles for ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Elad Alfassa (@elad661) +description: + - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - absent - alert profile should not exist, + - present - alert profile should exist, + choices: ['absent', 'present'] + default: 'present' + name: + type: str + description: + - The unique alert profile name in ManageIQ. + - Required when state is "absent" or "present". + resource_type: + type: str + description: + - The resource type for the alert profile in ManageIQ. Required when state is "present". + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', + 'ExtManagementSystem', 'MiddlewareServer'] + alerts: + type: list + elements: str + description: + - List of alert descriptions to assign to this profile. + - Required if state is "present" + notes: + type: str + description: + - Optional notes for this profile + +''' + +EXAMPLES = ''' +- name: Add an alert profile to ManageIQ + community.general.manageiq_alert_profiles: + state: present + name: Test profile + resource_type: ContainerNode + alerts: + - Test Alert 01 + - Test Alert 02 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Delete an alert profile from ManageIQ + community.general.manageiq_alert_profiles: + state: absent + name: Test profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQAlertProfiles(object): + """ Object to execute alert profile management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url) + + def get_profiles(self): + """ Get all alert profiles from ManageIQ + """ + try: + response = self.client.get(self.url + '?expand=alert_definitions,resources') + except Exception as e: + self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e)) + return response.get('resources') or [] + + def get_alerts(self, alert_descriptions): + """ Get a list of alert hrefs from a list of alert descriptions + """ + alerts = [] + for alert_description in alert_descriptions: + alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", + description=alert_description) + alerts.append(alert['href']) + + return alerts + + def add_profile(self, profile): + """ Add a new alert profile to ManageIQ + """ + # find all alerts to add to the profile + # we do this first to fail early if one is missing. + alerts = self.get_alerts(profile['alerts']) + + # build the profile dict to send to the server + + profile_dict = dict(name=profile['name'], + description=profile['name'], + mode=profile['resource_type']) + if profile['notes']: + profile_dict['set_data'] = dict(notes=profile['notes']) + + # send it to the server + try: + result = self.client.post(self.url, resource=profile_dict, action="create") + except Exception as e: + self.module.fail_json(msg="Creating profile failed {error}".format(error=e)) + + # now that it has been created, we can assign the alerts + self.assign_or_unassign(result['results'][0], alerts, "assign") + + msg = "Profile {name} created successfully" + msg = msg.format(name=profile['name']) + return dict(changed=True, msg=msg) + + def delete_profile(self, profile): + """ Delete an alert profile from ManageIQ + """ + try: + self.client.post(profile['href'], action="delete") + except Exception as e: + self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e)) + + msg = "Successfully deleted profile {name}".format(name=profile['name']) + return dict(changed=True, msg=msg) + + def get_alert_href(self, alert): + """ Get an absolute href for an alert + """ + return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id']) + + def assign_or_unassign(self, profile, resources, action): + """ Assign or unassign alerts to profile, and validate the result. + """ + alerts = [dict(href=href) for href in resources] + + subcollection_url = profile['href'] + '/alert_definitions' + try: + result = self.client.post(subcollection_url, resources=alerts, action=action) + if len(result['results']) != len(alerts): + msg = "Failed to {action} alerts to profile '{name}'," +\ + "expected {expected} alerts to be {action}ed," +\ + "but only {changed} were {action}ed" + msg = msg.format(action=action, + name=profile['name'], + expected=len(alerts), + changed=result['results']) + self.module.fail_json(msg=msg) + except Exception as e: + msg = "Failed to {action} alerts to profile '{name}': {error}" + msg = msg.format(action=action, name=profile['name'], error=e) + self.module.fail_json(msg=msg) + + return result['results'] + + def update_profile(self, old_profile, desired_profile): + """ Update alert profile in ManageIQ + """ + changed = False + # we need to use client.get to query the alert definitions + old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions') + + # figure out which alerts we need to assign / unassign + # alerts listed by the user: + desired_alerts = set(self.get_alerts(desired_profile['alerts'])) + + # alert which currently exist in the profile + if 'alert_definitions' in old_profile: + # we use get_alert_href to have a direct href to the alert + existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']]) + else: + # no alerts in this profile + existing_alerts = set() + + to_add = list(desired_alerts - existing_alerts) + to_remove = list(existing_alerts - desired_alerts) + + # assign / unassign the alerts, if needed + + if to_remove: + self.assign_or_unassign(old_profile, to_remove, "unassign") + changed = True + if to_add: + self.assign_or_unassign(old_profile, to_add, "assign") + changed = True + + # update other properties + profile_dict = dict() + + if old_profile['mode'] != desired_profile['resource_type']: + # mode needs to be updated + profile_dict['mode'] = desired_profile['resource_type'] + + # check if notes need to be updated + old_notes = old_profile.get('set_data', {}).get('notes') + + if desired_profile['notes'] != old_notes: + profile_dict['set_data'] = dict(notes=desired_profile['notes']) + + if profile_dict: + # if we have any updated values + changed = True + try: + result = self.client.post(old_profile['href'], + resource=profile_dict, + action="edit") + except Exception as e: + msg = "Updating profile '{name}' failed: {error}" + msg = msg.format(name=old_profile['name'], error=e) + self.module.fail_json(msg=msg) + + if changed: + msg = "Profile {name} updated successfully".format(name=desired_profile['name']) + else: + msg = "No update needed for profile {name}".format(name=desired_profile['name']) + return dict(changed=changed, msg=msg) + + +def main(): + argument_spec = dict( + name=dict(type='str'), + resource_type=dict(type='str', choices=['Vm', + 'ContainerNode', + 'MiqServer', + 'Host', + 'Storage', + 'EmsCluster', + 'ExtManagementSystem', + 'MiddlewareServer']), + alerts=dict(type='list', elements='str'), + notes=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['name', 'resource_type']), + ('state', 'absent', ['name'])]) + + state = module.params['state'] + name = module.params['name'] + + manageiq = ManageIQ(module) + manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) + + existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", + name=name) + + # we need to add or update the alert profile + if state == "present": + if not existing_profile: + # a profile with this name doesn't exist yet, let's create it + res_args = manageiq_alert_profiles.add_profile(module.params) + else: + # a profile with this name exists, we might need to update it + res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params) + + # this alert profile should not exist + if state == "absent": + # if we have an alert profile with this name, delete it + if existing_profile: + res_args = manageiq_alert_profiles.delete_profile(existing_profile) + else: + # This alert profile does not exist in ManageIQ, and that's okay + msg = "Alert profile '{name}' does not exist in ManageIQ" + msg = msg.format(name=name) + res_args = dict(changed=False, msg=msg) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py new file mode 100644 index 000000000..518b29f1f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_alerts + +short_description: Configuration of alerts in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Elad Alfassa (@elad661) +description: + - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - absent - alert should not exist, + - present - alert should exist, + required: false + choices: ['absent', 'present'] + default: 'present' + description: + type: str + description: + - The unique alert description in ManageIQ. + - Required when state is "absent" or "present". + resource_type: + type: str + description: + - The entity type for the alert in ManageIQ. Required when state is "present". + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', + 'ExtManagementSystem', 'MiddlewareServer'] + expression_type: + type: str + description: + - Expression type. + default: hash + choices: ["hash", "miq"] + expression: + type: dict + description: + - The alert expression for ManageIQ. + - Can either be in the "Miq Expression" format or the "Hash Expression format". + - Required if state is "present". + enabled: + description: + - Enable or disable the alert. Required if state is "present". + type: bool + options: + type: dict + description: + - Additional alert options, such as notification type and frequency + + +''' + +EXAMPLES = ''' +- name: Add an alert with a "hash expression" to ManageIQ + community.general.manageiq_alerts: + state: present + description: Test Alert 01 + options: + notifications: + email: + to: ["example@example.com"] + from: "example@example.com" + resource_type: ContainerNode + expression: + eval_method: hostd_log_threshold + mode: internal + options: {} + enabled: true + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Add an alert with a "miq expression" to ManageIQ + community.general.manageiq_alerts: + state: present + description: Test Alert 02 + options: + notifications: + email: + to: ["example@example.com"] + from: "example@example.com" + resource_type: Vm + expression_type: miq + expression: + and: + - CONTAINS: + tag: Vm.managed-environment + value: prod + - not: + CONTAINS: + tag: Vm.host.managed-environment + value: prod + enabled: true + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Delete an alert from ManageIQ + community.general.manageiq_alerts: + state: absent + description: Test Alert 01 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQAlert(object): + """ Represent a ManageIQ alert. Can be initialized with both the format + we receive from the server and the format we get from the user. + """ + def __init__(self, alert): + self.description = alert['description'] + self.db = alert['db'] + self.enabled = alert['enabled'] + self.options = alert['options'] + self.hash_expression = None + self.miq_expressipn = None + + if 'hash_expression' in alert: + self.hash_expression = alert['hash_expression'] + if 'miq_expression' in alert: + self.miq_expression = alert['miq_expression'] + if 'exp' in self.miq_expression: + # miq_expression is a field that needs a special case, because + # it's returned surrounded by a dict named exp even though we don't + # send it with that dict. + self.miq_expression = self.miq_expression['exp'] + + def __eq__(self, other): + """ Compare two ManageIQAlert objects + """ + return self.__dict__ == other.__dict__ + + +class ManageIQAlerts(object): + """ Object to execute alert management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url) + + def get_alerts(self): + """ Get all alerts from ManageIQ + """ + try: + response = self.client.get(self.alerts_url + '?expand=resources') + except Exception as e: + self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e)) + return response.get('resources', []) + + def validate_hash_expression(self, expression): + """ Validate a 'hash expression' alert definition + """ + # hash expressions must have the following fields + for key in ['options', 'eval_method', 'mode']: + if key not in expression: + msg = "Hash expression is missing required field {key}".format(key=key) + self.module.fail_json(msg) + + def create_alert_dict(self, params): + """ Create a dict representing an alert + """ + if params['expression_type'] == 'hash': + # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76 + self.validate_hash_expression(params['expression']) + expression_type = 'hash_expression' + else: + # actually miq_expression, but we call it "expression" for backwards-compatibility + expression_type = 'expression' + + # build the alret + alert = dict(description=params['description'], + db=params['resource_type'], + options=params['options'], + enabled=params['enabled']) + + # add the actual expression. + alert.update({expression_type: params['expression']}) + + return alert + + def add_alert(self, alert): + """ Add a new alert to ManageIQ + """ + try: + result = self.client.post(self.alerts_url, action='create', resource=alert) + + msg = "Alert {description} created successfully: {details}" + msg = msg.format(description=alert['description'], details=result) + return dict(changed=True, msg=msg) + except Exception as e: + msg = "Creating alert {description} failed: {error}" + if "Resource expression needs be specified" in str(e): + # Running on an older version of ManageIQ and trying to create a hash expression + msg = msg.format(description=alert['description'], + error="Your version of ManageIQ does not support hash_expression") + else: + msg = msg.format(description=alert['description'], error=e) + self.module.fail_json(msg=msg) + + def delete_alert(self, alert): + """ Delete an alert + """ + try: + result = self.client.post('{url}/{id}'.format(url=self.alerts_url, + id=alert['id']), + action="delete") + msg = "Alert {description} deleted: {details}" + msg = msg.format(description=alert['description'], details=result) + return dict(changed=True, msg=msg) + except Exception as e: + msg = "Deleting alert {description} failed: {error}" + msg = msg.format(description=alert['description'], error=e) + self.module.fail_json(msg=msg) + + def update_alert(self, existing_alert, new_alert): + """ Update an existing alert with the values from `new_alert` + """ + new_alert_obj = ManageIQAlert(new_alert) + if new_alert_obj == ManageIQAlert(existing_alert): + # no change needed - alerts are identical + return dict(changed=False, msg="No update needed") + else: + try: + url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id']) + result = self.client.post(url, action="edit", resource=new_alert) + + # make sure that the update was indeed successful by comparing + # the result to the expected result. + if new_alert_obj == ManageIQAlert(result): + # success! + msg = "Alert {description} updated successfully: {details}" + msg = msg.format(description=existing_alert['description'], details=result) + + return dict(changed=True, msg=msg) + else: + # unexpected result + msg = "Updating alert {description} failed, unexpected result {details}" + msg = msg.format(description=existing_alert['description'], details=result) + + self.module.fail_json(msg=msg) + + except Exception as e: + msg = "Updating alert {description} failed: {error}" + if "Resource expression needs be specified" in str(e): + # Running on an older version of ManageIQ and trying to update a hash expression + msg = msg.format(description=existing_alert['description'], + error="Your version of ManageIQ does not support hash_expression") + else: + msg = msg.format(description=existing_alert['description'], error=e) + self.module.fail_json(msg=msg) + + +def main(): + argument_spec = dict( + description=dict(type='str'), + resource_type=dict(type='str', choices=['Vm', + 'ContainerNode', + 'MiqServer', + 'Host', + 'Storage', + 'EmsCluster', + 'ExtManagementSystem', + 'MiddlewareServer']), + expression_type=dict(type='str', default='hash', choices=['miq', 'hash']), + expression=dict(type='dict'), + options=dict(type='dict'), + enabled=dict(type='bool'), + state=dict(required=False, default='present', + choices=['present', 'absent']), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule(argument_spec=argument_spec, + required_if=[('state', 'present', ['description', + 'resource_type', + 'expression', + 'enabled', + 'options']), + ('state', 'absent', ['description'])]) + + state = module.params['state'] + description = module.params['description'] + + manageiq = ManageIQ(module) + manageiq_alerts = ManageIQAlerts(manageiq) + + existing_alert = manageiq.find_collection_resource_by("alert_definitions", + description=description) + + # we need to add or update the alert + if state == "present": + alert = manageiq_alerts.create_alert_dict(module.params) + + if not existing_alert: + # an alert with this description doesn't exist yet, let's create it + res_args = manageiq_alerts.add_alert(alert) + else: + # an alert with this description exists, we might need to update it + res_args = manageiq_alerts.update_alert(existing_alert, alert) + + # this alert should not exist + elif state == "absent": + # if we have an alert with this description, delete it + if existing_alert: + res_args = manageiq_alerts.delete_alert(existing_alert) + else: + # it doesn't exist, and that's okay + msg = "Alert '{description}' does not exist in ManageIQ" + msg = msg.format(description=description) + res_args = dict(changed=False, msg=msg) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_group.py b/ansible_collections/community/general/plugins/modules/manageiq_group.py new file mode 100644 index 000000000..a142a939f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_group.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: manageiq_group + +short_description: Management of groups in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_group module supports adding, updating and deleting groups in ManageIQ. +requirements: + - manageiq-client + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - absent - group should not exist, present - group should be. + choices: ['absent', 'present'] + default: 'present' + description: + type: str + description: + - The group description. + required: true + default: null + role_id: + type: int + description: + - The the group role id + required: false + default: null + role: + type: str + description: + - The the group role name + - The C(role_id) has precedence over the C(role) when supplied. + required: false + default: null + tenant_id: + type: int + description: + - The tenant for the group identified by the tenant id. + required: false + default: null + tenant: + type: str + description: + - The tenant for the group identified by the tenant name. + - The C(tenant_id) has precedence over the C(tenant) when supplied. + - Tenant names are case sensitive. + required: false + default: null + managed_filters: + description: The tag values per category + type: dict + required: false + default: null + managed_filters_merge_mode: + type: str + description: + - In merge mode existing categories are kept or updated, new categories are added. + - In replace mode all categories will be replaced with the supplied C(managed_filters). + choices: [ merge, replace ] + default: replace + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder + type: list + elements: str + required: false + default: null + belongsto_filters_merge_mode: + type: str + description: + - In merge mode existing settings are merged with the supplied C(belongsto_filters). + - In replace mode current values are replaced with the supplied C(belongsto_filters). + choices: [ merge, replace ] + default: replace +''' + +EXAMPLES = ''' +- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: 'my_tenant' + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant_id: 4 + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: + - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. + - Apply 3 prov_max_cpu and 2 department tags to the group. + - Limit access to a cluster for the group. + community.general.manageiq_group: + description: 'MyGroup-user' + role: 'EvmRole-user' + tenant: my_tenant + managed_filters: + prov_max_cpu: + - '1' + - '2' + - '4' + department: + - defense + - engineering + managed_filters_merge_mode: replace + belongsto_filters: + - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" + belongsto_filters_merge_mode: merge + manageiq_connection: + url: 'https://manageiq_server' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Delete a group in ManageIQ + community.general.manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + +- name: Delete a group in ManageIQ using a token + community.general.manageiq_group: + state: 'absent' + description: 'MyGroup-user' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' +''' + +RETURN = ''' +group: + description: The group. + returned: success + type: complex + contains: + description: + description: The group description + returned: success + type: str + id: + description: The group id + returned: success + type: int + group_type: + description: The group type, system or user + returned: success + type: str + role: + description: The group role name + returned: success + type: str + tenant: + description: The group tenant name + returned: success + type: str + managed_filters: + description: The tag values per category + returned: success + type: dict + belongsto_filters: + description: A list of strings with a reference to the allowed host, cluster or folder + returned: success + type: list + created_on: + description: Group creation date + returned: success + type: str + sample: "2018-08-12T08:37:55+00:00" + updated_on: + description: Group update date + returned: success + type: int + sample: "2018-08-12T08:37:55+00:00" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQgroup(object): + """ + Object to execute group management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group(self, description): + """ Search for group object by description. + Returns: + the group, or None if group was not found. + """ + groups = self.client.collections.groups.find_by(description=description) + if len(groups) == 0: + return None + else: + return groups[0] + + def tenant(self, tenant_id, tenant_name): + """ Search for tenant entity by name or id + Returns: + the tenant entity, None if no id or name was supplied + """ + + if tenant_id: + tenant = self.client.get_entity('tenants', tenant_id) + if not tenant: + self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id)) + return tenant + else: + if tenant_name: + tenant_res = self.client.collections.tenants.find_by(name=tenant_name) + if not tenant_res: + self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name) + if len(tenant_res) > 1: + self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name) + tenant = tenant_res[0] + return tenant + else: + # No tenant name or tenant id supplied + return None + + def role(self, role_id, role_name): + """ Search for a role object by name or id. + Returns: + the role entity, None no id or name was supplied + + the role, or send a module Fail signal if role not found. + """ + if role_id: + role = self.client.get_entity('roles', role_id) + if not role: + self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id)) + return role + else: + if role_name: + role_res = self.client.collections.roles.find_by(name=role_name) + if not role_res: + self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name) + if len(role_res) > 1: + self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name) + return role_res[0] + else: + # No role name or role id supplied + return None + + @staticmethod + def merge_dict_values(norm_current_values, norm_updated_values): + """ Create an merged update object for manageiq group filters. + + The input dict contain the tag values per category. + If the new values contain the category, all tags for that category are replaced + If the new values do not contain the category, the existing tags are kept + + Returns: + the nested array with the merged values, used in the update post body + """ + + # If no updated values are supplied, in merge mode, the original values must be returned + # otherwise the existing tag filters will be removed. + if norm_current_values and (not norm_updated_values): + return norm_current_values + + # If no existing tag filters exist, use the user supplied values + if (not norm_current_values) and norm_updated_values: + return norm_updated_values + + # start with norm_current_values's keys and values + res = norm_current_values.copy() + # replace res with norm_updated_values's keys and values + res.update(norm_updated_values) + return res + + def delete_group(self, group): + """ Deletes a group from manageiq. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + try: + url = '%s/groups/%s' % (self.api_url, group['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict( + changed=True, + msg="deleted group %s with id %s" % (group['description'], group['id'])) + + def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group. + + Returns: + a dict of: + changed: boolean indicating if the entity was updated. + msg: a short message describing the operation executed. + """ + + if role or norm_managed_filters or belongsto_filters: + group.reload(attributes=['miq_user_role_name', 'entitlement']) + + try: + current_role = group['miq_user_role_name'] + except AttributeError: + current_role = None + + changed = False + resource = {} + + if description and group['description'] != description: + resource['description'] = description + changed = True + + if tenant and group['tenant_id'] != tenant['id']: + resource['tenant'] = dict(id=tenant['id']) + changed = True + + if role and current_role != role['name']: + resource['role'] = dict(id=role['id']) + changed = True + + if norm_managed_filters or belongsto_filters: + + # Only compare if filters are supplied + entitlement = group['entitlement'] + + if 'filters' not in entitlement: + # No existing filters exist, use supplied filters + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + changed = True + else: + current_filters = entitlement['filters'] + new_filters = self.edit_group_edit_filters(current_filters, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + if new_filters: + resource['filters'] = new_filters + changed = True + + if not changed: + return dict( + changed=False, + msg="group %s is not changed." % group['description']) + + # try to update group + try: + self.client.post(group['href'], action='edit', resource=resource) + changed = True + except Exception as e: + self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e))) + + return dict( + changed=changed, + msg="successfully updated the group %s with id %s" % (group['description'], group['id'])) + + def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode): + """ Edit a manageiq group filters. + + Returns: + None if no the group was not updated + If the group was updated the post body part for updating the group + """ + filters_updated = False + new_filters_resource = {} + + current_belongsto_set = current_filters.get('belongsto', set()) + + if belongsto_filters: + new_belongsto_set = set(belongsto_filters) + else: + new_belongsto_set = set() + + if current_belongsto_set == new_belongsto_set: + new_filters_resource['belongsto'] = current_filters['belongsto'] + else: + if belongsto_filters_merge_mode == 'merge': + current_belongsto_set.update(new_belongsto_set) + new_filters_resource['belongsto'] = list(current_belongsto_set) + else: + new_filters_resource['belongsto'] = list(new_belongsto_set) + filters_updated = True + + # Process belongsto managed filter tags + # The input is in the form dict with keys are the categories and the tags are supplied string array + # ManageIQ, the current_managed, uses an array of arrays. One array of categories. + # We normalize the user input from a dict with arrays to a dict of sorted arrays + # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare + norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters) + + if norm_current_filters == norm_managed_filters: + if 'managed' in current_filters: + new_filters_resource['managed'] = current_filters['managed'] + else: + if managed_filters_merge_mode == 'merge': + merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters) + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict) + else: + new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + filters_updated = True + + if not filters_updated: + return None + + return new_filters_resource + + def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters): + """ Creates the group in manageiq. + + Returns: + the created group id, name, created_on timestamp, + updated_on timestamp. + """ + # check for required arguments + for key, value in dict(description=description).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/groups' % self.api_url + + resource = {'description': description} + + if role is not None: + resource['role'] = dict(id=role['id']) + + if tenant is not None: + resource['tenant'] = dict(id=tenant['id']) + + if norm_managed_filters or belongsto_filters: + managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters) + resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters} + + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e))) + + return dict( + changed=True, + msg="successfully created group %s" % description, + group_id=result['results'][0]['id'] + ) + + @staticmethod + def normalized_managed_tag_filters_to_miq(norm_managed_filters): + if not norm_managed_filters: + return None + + return list(norm_managed_filters.values()) + + @staticmethod + def manageiq_filters_to_sorted_dict(current_filters): + current_managed_filters = current_filters.get('managed') + if not current_managed_filters: + return None + + res = {} + for tag_list in current_managed_filters: + tag_list.sort() + key = tag_list[0].split('/')[2] + res[key] = tag_list + + return res + + @staticmethod + def normalize_user_managed_filters_to_sorted_dict(managed_filters, module): + if not managed_filters: + return None + + res = {} + for cat_key in managed_filters: + cat_array = [] + if not isinstance(managed_filters[cat_key], list): + module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key)) + for tags in managed_filters[cat_key]: + miq_managed_tag = "/managed/" + cat_key + "/" + tags + cat_array.append(miq_managed_tag) + # Do not add empty categories. ManageIQ will remove all categories that are not supplied + if cat_array: + cat_array.sort() + res[cat_key] = cat_array + return res + + @staticmethod + def create_result_group(group): + """ Creates the ansible result object from a manageiq group entity + + Returns: + a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on + """ + try: + role_name = group['miq_user_role_name'] + except AttributeError: + role_name = None + + managed_filters = None + belongsto_filters = None + if 'filters' in group['entitlement']: + filters = group['entitlement']['filters'] + belongsto_filters = filters.get('belongsto') + group_managed_filters = filters.get('managed') + if group_managed_filters: + managed_filters = {} + for tag_list in group_managed_filters: + key = tag_list[0].split('/')[2] + tags = [] + for t in tag_list: + tags.append(t.split('/')[3]) + managed_filters[key] = tags + + return dict( + id=group['id'], + description=group['description'], + role=role_name, + tenant=group['tenant']['name'], + managed_filters=managed_filters, + belongsto_filters=belongsto_filters, + group_type=group['group_type'], + created_on=group['created_on'], + updated_on=group['updated_on'], + ) + + +def main(): + argument_spec = dict( + description=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + role_id=dict(required=False, type='int'), + role=dict(required=False, type='str'), + tenant_id=dict(required=False, type='int'), + tenant=dict(required=False, type='str'), + managed_filters=dict(required=False, type='dict'), + managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + belongsto_filters=dict(required=False, type='list', elements='str'), + belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + description = module.params['description'] + state = module.params['state'] + role_id = module.params['role_id'] + role_name = module.params['role'] + tenant_id = module.params['tenant_id'] + tenant_name = module.params['tenant'] + managed_filters = module.params['managed_filters'] + managed_filters_merge_mode = module.params['managed_filters_merge_mode'] + belongsto_filters = module.params['belongsto_filters'] + belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode'] + + manageiq = ManageIQ(module) + manageiq_group = ManageIQgroup(manageiq) + + group = manageiq_group.group(description) + + # group should not exist + if state == "absent": + # if we have a group, delete it + if group: + res_args = manageiq_group.delete_group(group) + # if we do not have a group, nothing to do + else: + res_args = dict( + changed=False, + msg="group '%s' does not exist in manageiq" % description) + + # group should exist + if state == "present": + + tenant = manageiq_group.tenant(tenant_id, tenant_name) + role = manageiq_group.role(role_id, role_name) + norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module) + # if we have a group, edit it + if group: + res_args = manageiq_group.edit_group(group, description, role, tenant, + norm_managed_filters, managed_filters_merge_mode, + belongsto_filters, belongsto_filters_merge_mode) + + # if we do not have a group, create it + else: + res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters) + group = manageiq.client.get_entity('groups', res_args['group_id']) + + group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement']) + res_args['group'] = manageiq_group.create_result_group(group) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/ansible_collections/community/general/plugins/modules/manageiq_policies.py new file mode 100644 index 000000000..061168f7f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_policies.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_policies + +short_description: Management of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - C(absent) - policy_profiles should not exist, + - C(present) - policy_profiles should exist, + - > + C(list) - list current policy_profiles and policies. + This state is deprecated and will be removed 8.0.0. + Please use the module M(community.general.manageiq_policies_info) instead. + choices: ['absent', 'present', 'list'] + default: 'present' + policy_profiles: + type: list + elements: dict + description: + - List of dictionaries, each includes the policy_profile C(name) key. + - Required if I(state) is C(present) or C(absent). + resource_type: + type: str + description: + - The type of the resource to which the profile should be [un]assigned. + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + type: str + description: + - The name of the resource to which the profile should be [un]assigned. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to which the profile should be [un]assigned. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. + version_added: 2.2.0 +''' + +EXAMPLES = ''' +- name: Assign new policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Unassign a policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: List current policy_profile and policies for a provider in ManageIQ + community.general.manageiq_policies: + state: list + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false +''' + +RETURN = ''' +manageiq_policies: + description: + - List current policy_profile and policies for a provider in ManageIQ + returned: always + type: dict + sample: '{ + "changed": false, + "profiles": [ + { + "policies": [ + { + "active": true, + "description": "OpenSCAP", + "name": "openscap policy" + }, + { + "active": true, + "description": "Analyse incoming container images", + "name": "analyse incoming container images" + }, + { + "active": true, + "description": "Schedule compliance after smart state analysis", + "name": "schedule compliance after smart state analysis" + } + ], + "profile_description": "OpenSCAP profile", + "profile_name": "openscap profile" + } + ] + }' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} + argument_spec = dict( + policy_profiles=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(required=False, type='str', + choices=['present', 'absent', 'list'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['policy_profiles']), + ('state', 'absent', ['policy_profiles']) + ], + ) + + policy_profiles = module.params['policy_profiles'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + if state == "list": + module.deprecate( + 'The value "list" for "state" is deprecated. Please use community.general.manageiq_policies_info instead.', + version='8.0.0', + collection_name='community.general' + ) + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + manageiq_policies = manageiq.policies(resource_id, resource_type, resource_name) + + if action == 'list': + # return a list of current profiles for this object + current_profiles = manageiq_policies.query_resource_profiles() + res_args = dict(changed=False, profiles=current_profiles) + else: + # assign or unassign the profiles + res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py new file mode 100644 index 000000000..8a75ef646 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_policies_info +version_added: 5.8.0 + +short_description: Listing of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - The manageiq_policies module supports listing policy_profiles in ManageIQ. + +options: + resource_type: + type: str + description: + - The type of the resource to obtain the profile for. + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + type: str + description: + - The name of the resource to obtain the profile for. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to obtain the profile for. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. +''' + +EXAMPLES = ''' +- name: List current policy_profile and policies for a provider in ManageIQ + community.general.manageiq_policies_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +''' + +RETURN = ''' +profiles: + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: list + elements: dict + sample: + - policies: + - active: true + description: OpenSCAP + name: openscap policy + - active: true, + description: Analyse incoming container images + name: analyse incoming container images + - active: true + description: Schedule compliance after smart state analysis + name: schedule compliance after smart state analysis + profile_description: OpenSCAP profile + profile_name: openscap profile +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + argument_spec = dict( + resource_id=dict(required=False, type='int'), + resource_name=dict(required=False, type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq_policies = ManageIQ(module).policies(resource_id, resource_type, resource_name) + + # return a list of current profiles for this object + current_profiles = manageiq_policies.query_resource_profiles() + res_args = dict(changed=False, profiles=current_profiles) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/ansible_collections/community/general/plugins/modules/manageiq_provider.py new file mode 100644 index 000000000..bbc27214b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_provider.py @@ -0,0 +1,939 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: manageiq_provider +short_description: Management of provider in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed + choices: ['absent', 'present', 'refresh'] + default: 'present' + name: + type: str + description: The provider's name. + required: true + type: + type: str + description: The provider's type. + choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] + zone: + type: str + description: The ManageIQ zone name that will manage the provider. + default: 'default' + provider_region: + type: str + description: The provider region name to connect to (e.g. AWS region for Amazon). + host_default_vnc_port_start: + type: str + description: The first port in the host VNC range. defaults to None. + host_default_vnc_port_end: + type: str + description: The last port in the host VNC range. defaults to None. + subscription: + type: str + description: Microsoft Azure subscription ID. defaults to None. + project: + type: str + description: Google Compute Engine Project ID. defaults to None. + azure_tenant_id: + type: str + description: Tenant ID. defaults to None. + aliases: [ keystone_v3_domain_id ] + tenant_mapping_enabled: + type: bool + default: false + description: Whether to enable mapping of existing tenants. defaults to False. + api_version: + type: str + description: The OpenStack Keystone API version. defaults to None. + choices: ['v2', 'v3'] + + provider: + description: Default endpoint connection information, required if state is true. + suboptions: + hostname: + type: str + description: The provider's api hostname. + required: true + port: + type: int + description: The provider's api port. + userid: + type: str + description: Provider's api endpoint authentication userid. defaults to None. + password: + type: str + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + type: str + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + type: bool + default: true + aliases: [ verify_ssl ] + security_protocol: + type: str + description: How SSL certificates should be used for HTTPS requests. defaults to None. + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + certificate_authority: + type: str + description: The CA bundle string with custom certificates. defaults to None. + + metrics: + description: Metrics endpoint connection information. + suboptions: + hostname: + type: str + description: The provider's api hostname. + required: true + port: + type: int + description: The provider's api port. + userid: + type: str + description: Provider's api endpoint authentication userid. defaults to None. + password: + type: str + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + type: str + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + type: bool + default: true + aliases: [ verify_ssl ] + security_protocol: + type: str + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + description: How SSL certificates should be used for HTTPS requests. defaults to None. + certificate_authority: + type: str + description: The CA bundle string with custom certificates. defaults to None. + path: + type: str + description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history). + + alerts: + description: Alerts endpoint connection information. + suboptions: + hostname: + type: str + description: The provider's api hostname. + required: true + port: + type: int + description: The provider's api port. + userid: + type: str + description: Provider's api endpoint authentication userid. defaults to None. + password: + type: str + description: Provider's api endpoint authentication password. defaults to None. + auth_key: + type: str + description: Provider's api endpoint authentication bearer token. defaults to None. + validate_certs: + type: bool + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + default: true + aliases: [ verify_ssl ] + security_protocol: + type: str + choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. defaults to None. + certificate_authority: + type: str + description: The CA bundle string with custom certificates. defaults to None. + + ssh_keypair: + description: SSH key pair used for SSH connections to all hosts in this provider. + suboptions: + hostname: + type: str + description: Director hostname. + required: true + userid: + type: str + description: SSH username. + auth_key: + type: str + description: SSH private key. + validate_certs: + description: + - Whether certificates should be verified for connections. + type: bool + default: true + aliases: [ verify_ssl ] +''' + +EXAMPLES = ''' +- name: Create a new provider in ManageIQ ('Hawkular' metrics) + community.general.manageiq_provider: + name: 'EngLab' + type: 'OpenShift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + role: 'hawkular' + hostname: 'example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1:80' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics) + community.general.manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'present' + provider: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 8443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + auth_key: 'topSecret' + hostname: 'next.example.com' + port: 443 + validate_certs: true + security_protocol: 'ssl-with-validation-custom-ca' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Delete a provider in ManageIQ + community.general.manageiq_provider: + name: 'EngLab' + type: 'Openshift' + state: 'absent' + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + + +- name: Create a new Amazon provider in ManageIQ using token authentication + community.general.manageiq_provider: + name: 'EngAmazon' + type: 'Amazon' + state: 'present' + provider: + hostname: 'amazon.example.com' + userid: 'hello' + password: 'world' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + + +- name: Create a new oVirt provider in ManageIQ + community.general.manageiq_provider: + name: 'RHEV' + type: 'oVirt' + state: 'present' + provider: + hostname: 'rhev01.example.com' + userid: 'admin@internal' + password: 'password' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + hostname: 'metrics.example.com' + path: 'ovirt_engine_history' + userid: 'user_id_metrics' + password: 'password_metrics' + validate_certs: true + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + manageiq_connection: + url: 'https://127.0.0.1' + username: 'admin' + password: 'password' + validate_certs: true + +- name: Create a new VMware provider in ManageIQ + community.general.manageiq_provider: + name: 'EngVMware' + type: 'VMware' + state: 'present' + provider: + hostname: 'vcenter.example.com' + host_default_vnc_port_start: 5800 + host_default_vnc_port_end: 5801 + userid: 'root' + password: 'password' + manageiq_connection: + url: 'https://127.0.0.1' + token: 'VeryLongToken' + validate_certs: true + +- name: Create a new Azure provider in ManageIQ + community.general.manageiq_provider: + name: 'EngAzure' + type: 'Azure' + provider_region: 'northeurope' + subscription: 'e272bd74-f661-484f-b223-88dd128a4049' + azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048' + state: 'present' + provider: + hostname: 'azure.example.com' + userid: 'e272bd74-f661-484f-b223-88dd128a4049' + password: 'password' + manageiq_connection: + url: 'https://cf-6af0.rhpds.opentlc.com' + username: 'admin' + password: 'password' + validate_certs: false + +- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair + community.general.manageiq_provider: + name: 'EngDirector' + type: 'Director' + api_version: 'v3' + state: 'present' + provider: + hostname: 'director.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + ssh_keypair: + hostname: director.example.com + userid: heat-admin + auth_key: 'SecretSSHPrivateKey' + +- name: Create a new OpenStack provider in ManageIQ with amqp metrics + community.general.manageiq_provider: + name: 'EngOpenStack' + type: 'OpenStack' + api_version: 'v3' + state: 'present' + provider_region: 'europe' + tenant_mapping_enabled: 'False' + keystone_v3_domain_id: 'mydomain' + provider: + hostname: 'openstack.example.com' + userid: 'admin' + password: 'password' + security_protocol: 'ssl-with-validation' + validate_certs: 'true' + certificate_authority: | + -----BEGIN CERTIFICATE----- + FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu + c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw + MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw + ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S + ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm + AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw + Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa + z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ + ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ + AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG + SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI + QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA + aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 + gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA + qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o + XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + -----END CERTIFICATE----- + metrics: + role: amqp + hostname: 'amqp.example.com' + security_protocol: 'non-ssl' + port: 5666 + userid: admin + password: password + + +- name: Create a new GCE provider in ManageIQ + community.general.manageiq_provider: + name: 'EngGoogle' + type: 'GCE' + provider_region: 'europe-west1' + project: 'project1' + state: 'present' + provider: + hostname: 'gce.example.com' + auth_key: 'google_json_key' + validate_certs: 'false' +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +def supported_providers(): + return dict( + Openshift=dict( + class_name='ManageIQ::Providers::Openshift::ContainerManager', + authtype='bearer', + default_role='default', + metrics_role='prometheus', + alerts_role='prometheus_alerts', + ), + Amazon=dict( + class_name='ManageIQ::Providers::Amazon::CloudManager', + ), + oVirt=dict( + class_name='ManageIQ::Providers::Redhat::InfraManager', + default_role='default', + metrics_role='metrics', + ), + VMware=dict( + class_name='ManageIQ::Providers::Vmware::InfraManager', + ), + Azure=dict( + class_name='ManageIQ::Providers::Azure::CloudManager', + ), + Director=dict( + class_name='ManageIQ::Providers::Openstack::InfraManager', + ssh_keypair_role="ssh_keypair" + ), + OpenStack=dict( + class_name='ManageIQ::Providers::Openstack::CloudManager', + ), + GCE=dict( + class_name='ManageIQ::Providers::Google::CloudManager', + ), + ) + + +def endpoint_list_spec(): + return dict( + provider=dict(type='dict', options=endpoint_argument_spec()), + metrics=dict(type='dict', options=endpoint_argument_spec()), + alerts=dict(type='dict', options=endpoint_argument_spec()), + ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False), + ) + + +def endpoint_argument_spec(): + return dict( + role=dict(), + hostname=dict(required=True), + port=dict(type='int'), + validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']), + certificate_authority=dict(), + security_protocol=dict( + choices=[ + 'ssl-with-validation', + 'ssl-with-validation-custom-ca', + 'ssl-without-validation', + 'non-ssl', + ], + ), + userid=dict(), + password=dict(no_log=True), + auth_key=dict(no_log=True), + subscription=dict(no_log=True), + project=dict(), + uid_ems=dict(), + path=dict(), + ) + + +def delete_nulls(h): + """ Remove null entries from a hash + + Returns: + a hash without nulls + """ + if isinstance(h, list): + return [delete_nulls(i) for i in h] + if isinstance(h, dict): + return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) + + return h + + +class ManageIQProvider(object): + """ + Object to execute provider management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def class_name_to_type(self, class_name): + """ Convert class_name to type + + Returns: + the type + """ + out = [k for k, v in supported_providers().items() if v['class_name'] == class_name] + if len(out) == 1: + return out[0] + + return None + + def zone_id(self, name): + """ Search for zone id by zone name. + + Returns: + the zone id, or send a module Fail signal if zone not found. + """ + zone = self.manageiq.find_collection_resource_by('zones', name=name) + if not zone: # zone doesn't exist + self.module.fail_json( + msg="zone %s does not exist in manageiq" % (name)) + + return zone['id'] + + def provider(self, name): + """ Search for provider object by name. + + Returns: + the provider, or None if provider not found. + """ + return self.manageiq.find_collection_resource_by('providers', name=name) + + def build_connection_configurations(self, provider_type, endpoints): + """ Build "connection_configurations" objects from + requested endpoints provided by user + + Returns: + the user requested provider endpoints list + """ + connection_configurations = [] + endpoint_keys = endpoint_list_spec().keys() + provider_defaults = supported_providers().get(provider_type, {}) + + # get endpoint defaults + endpoint = endpoints.get('provider') + default_auth_key = endpoint.get('auth_key') + + # build a connection_configuration object for each endpoint + for endpoint_key in endpoint_keys: + endpoint = endpoints.get(endpoint_key) + if endpoint: + # get role and authtype + role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default') + if role == 'default': + authtype = provider_defaults.get('authtype') or role + else: + authtype = role + + # set a connection_configuration + connection_configurations.append({ + 'endpoint': { + 'role': role, + 'hostname': endpoint.get('hostname'), + 'port': endpoint.get('port'), + 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)], + 'security_protocol': endpoint.get('security_protocol'), + 'certificate_authority': endpoint.get('certificate_authority'), + 'path': endpoint.get('path'), + }, + 'authentication': { + 'authtype': authtype, + 'userid': endpoint.get('userid'), + 'password': endpoint.get('password'), + 'auth_key': endpoint.get('auth_key') or default_auth_key, + } + }) + + return connection_configurations + + def delete_provider(self, provider): + """ Deletes a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Edit a provider from manageiq. + + Returns: + a short message describing the operation executed. + """ + url = '%s/providers/%s' % (self.api_url, provider['id']) + + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + connection_configurations=endpoints, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + ) + + # NOTE: we do not check for diff's between requested and current + # provider, we always submit endpoints with password or auth_keys, + # since we can not compare with current password or auth_key, + # every edit request is sent to ManageIQ API without comparing + # it to current state. + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to update provider + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the provider %s: %s" % (provider['name'], result)) + + def create_provider(self, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version): + """ Creates the provider in manageiq. + + Returns: + a short message describing the operation executed. + """ + resource = dict( + name=name, + zone={'id': zone_id}, + provider_region=provider_region, + host_default_vnc_port_start=host_default_vnc_port_start, + host_default_vnc_port_end=host_default_vnc_port_end, + subscription=subscription, + project=project, + uid_ems=uid_ems, + tenant_mapping_enabled=tenant_mapping_enabled, + api_version=api_version, + connection_configurations=endpoints, + ) + + # clean nulls, we do not send nulls to the api + resource = delete_nulls(resource) + + # try to create a new provider + try: + url = '%s/providers' % (self.api_url) + result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource) + except Exception as e: + self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created the provider %s: %s" % (name, result['results'])) + + def refresh(self, provider, name): + """ Trigger provider refresh. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/providers/%s' % (self.api_url, provider['id']) + result = self.client.post(url, action='refresh') + except Exception as e: + self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="refreshing provider %s" % name) + + +def main(): + zone_id = None + endpoints = [] + argument_spec = dict( + state=dict(choices=['absent', 'present', 'refresh'], default='present'), + name=dict(required=True), + zone=dict(default='default'), + provider_region=dict(), + host_default_vnc_port_start=dict(), + host_default_vnc_port_end=dict(), + subscription=dict(), + project=dict(), + azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), + tenant_mapping_enabled=dict(default=False, type='bool'), + api_version=dict(choices=['v2', 'v3']), + type=dict(choices=list(supported_providers().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + # add the endpoint arguments to the arguments + argument_spec.update(endpoint_list_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['provider']), + ('state', 'refresh', ['name'])], + required_together=[ + ['host_default_vnc_port_start', 'host_default_vnc_port_end'] + ], + ) + + name = module.params['name'] + zone_name = module.params['zone'] + provider_type = module.params['type'] + raw_endpoints = module.params + provider_region = module.params['provider_region'] + host_default_vnc_port_start = module.params['host_default_vnc_port_start'] + host_default_vnc_port_end = module.params['host_default_vnc_port_end'] + subscription = module.params['subscription'] + uid_ems = module.params['azure_tenant_id'] + project = module.params['project'] + tenant_mapping_enabled = module.params['tenant_mapping_enabled'] + api_version = module.params['api_version'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_provider = ManageIQProvider(manageiq) + + provider = manageiq_provider.provider(name) + + # provider should not exist + if state == "absent": + # if we have a provider, delete it + if provider: + res_args = manageiq_provider.delete_provider(provider) + # if we do not have a provider, nothing to do + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + # provider should exist + if state == "present": + # get data user did not explicitly give + if zone_name: + zone_id = manageiq_provider.zone_id(zone_name) + + # if we do not have a provider_type, use the current provider_type + if provider and not provider_type: + provider_type = manageiq_provider.class_name_to_type(provider['type']) + + # check supported_providers types + if not provider_type: + manageiq_provider.module.fail_json( + msg="missing required argument: provider_type") + + # check supported_providers types + if provider_type not in supported_providers().keys(): + manageiq_provider.module.fail_json( + msg="provider_type %s is not supported" % (provider_type)) + + # build "connection_configurations" objects from user requested endpoints + # "provider" is a required endpoint, if we have it, we have endpoints + if raw_endpoints.get("provider"): + endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints) + + # if we have a provider, edit it + if provider: + res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + # if we do not have a provider, create it + else: + res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region, + host_default_vnc_port_start, host_default_vnc_port_end, + subscription, project, uid_ems, tenant_mapping_enabled, api_version) + + # refresh provider (trigger sync) + if state == "refresh": + if provider: + res_args = manageiq_provider.refresh(provider, name) + else: + res_args = dict( + changed=False, + msg="provider %s: does not exist in manageiq" % (name)) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/ansible_collections/community/general/plugins/modules/manageiq_tags.py new file mode 100644 index 000000000..7e190d49c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_tags.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_tags + +short_description: Management of resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - C(absent) - tags should not exist. + - C(present) - tags should exist. + - C(list) - list current tags. + choices: ['absent', 'present', 'list'] + default: 'present' + tags: + type: list + elements: dict + description: + - C(tags) - list of dictionaries, each includes C(name) and c(category) keys. + - Required if I(state) is C(present) or C(absent). + resource_type: + type: str + description: + - The relevant resource type in manageiq. + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + type: str + description: + - The name of the resource at which tags will be controlled. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags will be controlled. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. + type: int + version_added: 2.2.0 +''' + +EXAMPLES = ''' +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_id: 23000000790497 + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Remove tags for a provider in ManageIQ. + community.general.manageiq_tags: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: List current tags for a provider in ManageIQ. + community.general.manageiq_tags: + state: list + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} + argument_spec = dict( + tags=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(required=False, type='str', + choices=['present', 'absent', 'list'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['tags']), + ('state', 'absent', ['tags']) + ], + ) + + tags = module.params['tags'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + if action == 'list': + # return a list of current tags for this object + current_tags = manageiq_tags.query_resource_tags() + res_args = dict(changed=False, tags=current_tags) + else: + # assign or unassign the tags + res_args = manageiq_tags.assign_or_unassign_tags(tags, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py new file mode 100644 index 000000000..af71e150c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_tags_info +version_added: 5.8.0 +short_description: Retrieve resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - This module supports retrieving resource tags from ManageIQ. + +options: + resource_type: + type: str + description: + - The relevant resource type in ManageIQ. + required: true + choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', + 'data store', 'group', 'resource pool', 'service', 'service template', + 'template', 'tenant', 'user'] + resource_name: + type: str + description: + - The name of the resource at which tags will be controlled. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags will be controlled. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. + type: int +''' + +EXAMPLES = ''' +- name: List current tags for a provider in ManageIQ. + community.general.manageiq_tags_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +''' + +RETURN = ''' +tags: + description: List of tags associated with the resource. + returned: on success + type: list + elements: dict +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + argument_spec = dict( + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the action and resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + # return a list of current tags for this object + current_tags = manageiq_tags.query_resource_tags() + res_args = dict(changed=False, tags=current_tags) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py new file mode 100644 index 000000000..d68e26a73 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py @@ -0,0 +1,550 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + +module: manageiq_tenant + +short_description: Management of tenants in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Evert Mulder (@evertmulder) +description: + - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ. +requirements: + - manageiq-client +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - absent - tenant should not exist, present - tenant should be. + choices: ['absent', 'present'] + default: 'present' + name: + type: str + description: + - The tenant name. + required: true + default: null + description: + type: str + description: + - The tenant description. + required: true + default: null + parent_id: + type: int + description: + - The id of the parent tenant. If not supplied the root tenant is used. + - The C(parent_id) takes president over C(parent) when supplied + required: false + default: null + parent: + type: str + description: + - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used. + required: false + default: null + quotas: + type: dict + description: + - The tenant quotas. + - All parameters case sensitive. + - 'Valid attributes are:' + - ' - C(cpu_allocated) (int): use null to remove the quota.' + - ' - C(mem_allocated) (GB): use null to remove the quota.' + - ' - C(storage_allocated) (GB): use null to remove the quota.' + - ' - C(vms_allocated) (int): use null to remove the quota.' + - ' - C(templates_allocated) (int): use null to remove the quota.' + required: false + default: {} +''' + +EXAMPLES = ''' +- name: Update the root tenant in ManageIQ + community.general.manageiq_tenant: + name: 'My Company' + description: 'My company name' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Create a tenant in ManageIQ + community.general.manageiq_tenant: + name: 'Dep1' + description: 'Manufacturing department' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Delete a tenant in ManageIQ + community.general.manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated + community.general.manageiq_tenant: + name: 'Dep1' + parent_id: 1 + quotas: + - cpu_allocated: 100 + - mem_allocated: 50 + - vms_allocated: null + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + + +- name: Delete a tenant in ManageIQ using a token + community.general.manageiq_tenant: + state: 'absent' + name: 'Dep1' + parent_id: 1 + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false +''' + +RETURN = ''' +tenant: + description: The tenant. + returned: success + type: complex + contains: + id: + description: The tenant id + returned: success + type: int + name: + description: The tenant name + returned: success + type: str + description: + description: The tenant description + returned: success + type: str + parent_id: + description: The id of the parent tenant + returned: success + type: int + quotas: + description: List of tenant quotas + returned: success + type: list + sample: + cpu_allocated: 100 + mem_allocated: 50 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQTenant(object): + """ + Object to execute tenant management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def tenant(self, name, parent_id, parent): + """ Search for tenant object by name and parent_id or parent + or the root tenant if no parent or parent_id is supplied. + Returns: + the parent tenant, None for the root tenant + the tenant or None if tenant was not found. + """ + + if parent_id: + parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id)) + parent_tenant = parent_tenant_res[0] + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if int(tenant_parent_id) == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + if parent: + parent_tenant_res = self.client.collections.tenants.find_by(name=parent) + if not parent_tenant_res: + self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent) + + if len(parent_tenant_res) > 1: + self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent) + + parent_tenant = parent_tenant_res[0] + parent_id = int(parent_tenant['id']) + tenants = self.client.collections.tenants.find_by(name=name) + + for tenant in tenants: + try: + ancestry = tenant['ancestry'] + except AttributeError: + ancestry = None + + if ancestry: + tenant_parent_id = int(ancestry.split("/")[-1]) + if tenant_parent_id == parent_id: + return parent_tenant, tenant + + return parent_tenant, None + else: + # No parent or parent id supplied we select the root tenant + return None, self.client.collections.tenants.find_by(ancestry=None)[0] + + def compare_tenant(self, tenant, name, description): + """ Compare tenant fields with new field values. + + Returns: + false if tenant fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and tenant['name'] != name) or + (description and tenant['description'] != description) + ) + + return not found_difference + + def delete_tenant(self, tenant): + """ Deletes a tenant from manageiq. + + Returns: + dict with `msg` and `changed` + """ + try: + url = '%s/tenants/%s' % (self.api_url, tenant['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e))) + + if result['success'] is False: + self.module.fail_json(msg=result['message']) + + return dict(changed=True, msg=result['message']) + + def edit_tenant(self, tenant, name, description): + """ Edit a manageiq tenant. + + Returns: + dict with `msg` and `changed` + """ + resource = dict(name=name, description=description, use_config_for_attributes=False) + + # check if we need to update ( compare_tenant is true is no difference found ) + if self.compare_tenant(tenant, name, description): + return dict( + changed=False, + msg="tenant %s is not changed." % tenant['name'], + tenant=tenant['_data']) + + # try to update tenant + try: + result = self.client.post(tenant['href'], action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e))) + + return dict( + changed=True, + msg="successfully updated the tenant with id %s" % (tenant['id'])) + + def create_tenant(self, name, description, parent_tenant): + """ Creates the tenant in manageiq. + + Returns: + dict with `msg`, `changed` and `tenant_id` + """ + parent_id = parent_tenant['id'] + # check for required arguments + for key, value in dict(name=name, description=description, parent_id=parent_id).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % key) + + url = '%s/tenants' % self.api_url + + resource = {'name': name, 'description': description, 'parent': {'id': parent_id}} + + try: + result = self.client.post(url, action='create', resource=resource) + tenant_id = result['results'][0]['id'] + except Exception as e: + self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e))) + + return dict( + changed=True, + msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id), + tenant_id=tenant_id) + + def tenant_quota(self, tenant, quota_key): + """ Search for tenant quota object by tenant and quota_key. + Returns: + the quota for the tenant, or None if the tenant quota was not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key)) + + return tenant_quotas['resources'] + + def tenant_quotas(self, tenant): + """ Search for tenant quotas object by tenant. + Returns: + the quotas for the tenant, or None if no tenant quotas were not found. + """ + + tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href'])) + + return tenant_quotas['resources'] + + def update_tenant_quotas(self, tenant, quotas): + """ Creates the tenant quotas in manageiq. + + Returns: + dict with `msg` and `changed` + """ + + changed = False + messages = [] + for quota_key, quota_value in quotas.items(): + current_quota_filtered = self.tenant_quota(tenant, quota_key) + if current_quota_filtered: + current_quota = current_quota_filtered[0] + else: + current_quota = None + + if quota_value: + # Change the byte values to GB + if quota_key in ['storage_allocated', 'mem_allocated']: + quota_value_int = int(quota_value) * 1024 * 1024 * 1024 + else: + quota_value_int = int(quota_value) + if current_quota: + res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int) + else: + res = self.create_tenant_quota(tenant, quota_key, quota_value_int) + else: + if current_quota: + res = self.delete_tenant_quota(tenant, current_quota) + else: + res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key) + + if res['changed']: + changed = True + + messages.append(res['msg']) + + return dict( + changed=changed, + msg=', '.join(messages)) + + def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value): + """ Update the tenant quotas in manageiq. + + Returns: + result + """ + + if current_quota['value'] == quota_value: + return dict( + changed=False, + msg="tenant quota %s already has value %s" % (quota_key, quota_value)) + else: + + url = '%s/quotas/%s' % (tenant['href'], current_quota['id']) + resource = {'value': quota_value} + try: + self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully updated tenant quota %s" % quota_key) + + def create_tenant_quota(self, tenant, quota_key, quota_value): + """ Creates the tenant quotas in manageiq. + + Returns: + result + """ + url = '%s/quotas' % (tenant['href']) + resource = {'name': quota_key, 'value': quota_value} + try: + self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e))) + + return dict( + changed=True, + msg="successfully created tenant quota %s" % quota_key) + + def delete_tenant_quota(self, tenant, quota): + """ deletes the tenant quotas in manageiq. + + Returns: + result + """ + try: + result = self.client.post(quota['href'], action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e))) + + return dict(changed=True, msg=result['message']) + + def create_tenant_response(self, tenant, parent_tenant): + """ Creates the ansible result object from a manageiq tenant entity + + Returns: + a dict with the tenant id, name, description, parent id, + quota's + """ + tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas']) + + try: + ancestry = tenant['ancestry'] + tenant_parent_id = ancestry.split("/")[-1] + except AttributeError: + # The root tenant does not return the ancestry attribute + tenant_parent_id = None + + return dict( + id=tenant['id'], + name=tenant['name'], + description=tenant['description'], + parent_id=tenant_parent_id, + quotas=tenant_quotas + ) + + @staticmethod + def create_tenant_quotas_response(tenant_quotas): + """ Creates the ansible result object from a manageiq tenant_quotas entity + + Returns: + a dict with the applied quotas, name and value + """ + + if not tenant_quotas: + return {} + + result = {} + for quota in tenant_quotas: + if quota['unit'] == 'bytes': + value = float(quota['value']) / (1024 * 1024 * 1024) + else: + value = quota['value'] + result[quota['name']] = value + return result + + +def main(): + argument_spec = dict( + name=dict(required=True, type='str'), + description=dict(required=True, type='str'), + parent_id=dict(required=False, type='int'), + parent=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + quotas=dict(type='dict', default={}) + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + name = module.params['name'] + description = module.params['description'] + parent_id = module.params['parent_id'] + parent = module.params['parent'] + state = module.params['state'] + quotas = module.params['quotas'] + + manageiq = ManageIQ(module) + manageiq_tenant = ManageIQTenant(manageiq) + + parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent) + + # tenant should not exist + if state == "absent": + # if we have a tenant, delete it + if tenant: + res_args = manageiq_tenant.delete_tenant(tenant) + # if we do not have a tenant, nothing to do + else: + if parent_id: + msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id) + else: + msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent) + + res_args = dict( + changed=False, + msg=msg) + + # tenant should exist + if state == "present": + # if we have a tenant, edit it + if tenant: + res_args = manageiq_tenant.edit_tenant(tenant, name, description) + + # if we do not have a tenant, create it + else: + res_args = manageiq_tenant.create_tenant(name, description, parent_tenant) + tenant = manageiq.client.get_entity('tenants', res_args['tenant_id']) + + # quotas as supplied and we have a tenant + if quotas: + tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas) + if tenant_quotas_res['changed']: + res_args['changed'] = True + res_args['tenant_quotas_msg'] = tenant_quotas_res['msg'] + + tenant.reload(expand='resources', attributes=['tenant_quotas']) + res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/manageiq_user.py b/ansible_collections/community/general/plugins/modules/manageiq_user.py new file mode 100644 index 000000000..0d3d8718b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/manageiq_user.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: manageiq_user + +short_description: Management of users in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_user module supports adding, updating and deleting users in ManageIQ. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - absent - user should not exist, present - user should be. + choices: ['absent', 'present'] + default: 'present' + userid: + type: str + description: + - The unique userid in manageiq, often mentioned as username. + required: true + name: + type: str + description: + - The users' full name. + password: + type: str + description: + - The users' password. + group: + type: str + description: + - The name of the group to which the user belongs. + email: + type: str + description: + - The users' E-mail address. + update_password: + type: str + default: always + choices: ['always', 'on_create'] + description: + - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user. +''' + +EXAMPLES = ''' +- name: Create a new user in ManageIQ + community.general.manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Create a new user in ManageIQ using a token + community.general.manageiq_user: + userid: 'jdoe' + name: 'Jane Doe' + password: 'VerySecret' + group: 'EvmGroup-user' + email: 'jdoe@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false + +- name: Delete a user in ManageIQ + community.general.manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Delete a user in ManageIQ using a token + community.general.manageiq_user: + state: 'absent' + userid: 'jdoe' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false + +- name: Update email of user in ManageIQ + community.general.manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false + +- name: Update email of user in ManageIQ using a token + community.general.manageiq_user: + userid: 'jdoe' + email: 'jaustine@example.com' + manageiq_connection: + url: 'http://127.0.0.1:3000' + token: 'sometoken' + validate_certs: false +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec + + +class ManageIQUser(object): + """ + Object to execute user management operations in manageiq. + """ + + def __init__(self, manageiq): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + def group_id(self, description): + """ Search for group id by group description. + + Returns: + the group id, or send a module Fail signal if group not found. + """ + group = self.manageiq.find_collection_resource_by('groups', description=description) + if not group: # group doesn't exist + self.module.fail_json( + msg="group %s does not exist in manageiq" % (description)) + + return group['id'] + + def user(self, userid): + """ Search for user object by userid. + + Returns: + the user, or None if user not found. + """ + return self.manageiq.find_collection_resource_by('users', userid=userid) + + def compare_user(self, user, name, group_id, password, email): + """ Compare user fields with new field values. + + Returns: + false if user fields have some difference from new fields, true o/w. + """ + found_difference = ( + (name and user['name'] != name) or + (password is not None) or + (email and user['email'] != email) or + (group_id and user['current_group_id'] != group_id) + ) + + return not found_difference + + def delete_user(self, user): + """ Deletes a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + try: + url = '%s/users/%s' % (self.api_url, user['id']) + result = self.client.post(url, action='delete') + except Exception as e: + self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e))) + + return dict(changed=True, msg=result['message']) + + def edit_user(self, user, name, group, password, email): + """ Edit a user from manageiq. + + Returns: + a short message describing the operation executed. + """ + group_id = None + url = '%s/users/%s' % (self.api_url, user['id']) + + resource = dict(userid=user['userid']) + if group is not None: + group_id = self.group_id(group) + resource['group'] = dict(id=group_id) + if name is not None: + resource['name'] = name + if email is not None: + resource['email'] = email + + # if there is a password param, but 'update_password' is 'on_create' + # then discard the password (since we're editing an existing user) + if self.module.params['update_password'] == 'on_create': + password = None + if password is not None: + resource['password'] = password + + # check if we need to update ( compare_user is true is no difference found ) + if self.compare_user(user, name, group_id, password, email): + return dict( + changed=False, + msg="user %s is not changed." % (user['userid'])) + + # try to update user + try: + result = self.client.post(url, action='edit', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e))) + + return dict( + changed=True, + msg="successfully updated the user %s: %s" % (user['userid'], result)) + + def create_user(self, userid, name, group, password, email): + """ Creates the user in manageiq. + + Returns: + the created user id, name, created_on timestamp, + updated_on timestamp, userid and current_group_id. + """ + # check for required arguments + for key, value in dict(name=name, group=group, password=password).items(): + if value in (None, ''): + self.module.fail_json(msg="missing required argument: %s" % (key)) + + group_id = self.group_id(group) + url = '%s/users' % (self.api_url) + + resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}} + if email is not None: + resource['email'] = email + + # try to create a new user + try: + result = self.client.post(url, action='create', resource=resource) + except Exception as e: + self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e))) + + return dict( + changed=True, + msg="successfully created the user %s: %s" % (userid, result['results'])) + + +def main(): + argument_spec = dict( + userid=dict(required=True, type='str'), + name=dict(), + password=dict(no_log=True), + group=dict(), + email=dict(), + state=dict(choices=['absent', 'present'], default='present'), + update_password=dict(choices=['always', 'on_create'], + default='always'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + userid = module.params['userid'] + name = module.params['name'] + password = module.params['password'] + group = module.params['group'] + email = module.params['email'] + state = module.params['state'] + + manageiq = ManageIQ(module) + manageiq_user = ManageIQUser(manageiq) + + user = manageiq_user.user(userid) + + # user should not exist + if state == "absent": + # if we have a user, delete it + if user: + res_args = manageiq_user.delete_user(user) + # if we do not have a user, nothing to do + else: + res_args = dict( + changed=False, + msg="user %s: does not exist in manageiq" % (userid)) + + # user should exist + if state == "present": + # if we have a user, edit it + if user: + res_args = manageiq_user.edit_user(user, name, group, password, email) + # if we do not have a user, create it + else: + res_args = manageiq_user.create_user(userid, name, group, password, email) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/mas.py b/ansible_collections/community/general/plugins/modules/mas.py new file mode 100644 index 000000000..5b8958beb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mas.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Lukas Bestle +# Copyright (c) 2017, Michael Heap +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: mas +short_description: Manage Mac App Store applications with mas-cli +description: + - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). +version_added: '0.2.0' +author: + - Michael Heap (@mheap) + - Lukas Bestle (@lukasbestle) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The Mac App Store identifier of the app(s) you want to manage. + - This can be found by running C(mas search APP_NAME) on your machine. + type: list + elements: int + state: + description: + - Desired state of the app installation. + - The C(absent) value requires root permissions, also see the examples. + type: str + choices: + - absent + - latest + - present + default: present + upgrade_all: + description: + - Upgrade all installed Mac App Store apps. + type: bool + default: false + aliases: ["upgrade"] +requirements: + - macOS 10.11+ + - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" + - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). +''' + +EXAMPLES = ''' +- name: Install Keynote + community.general.mas: + id: 409183694 + state: present + +- name: Install Divvy with command mas installed in /usr/local/bin + community.general.mas: + id: 413857545 + state: present + environment: + PATH: /usr/local/bin:{{ ansible_facts.env.PATH }} + +- name: Install a list of apps + community.general.mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + +- name: Ensure the latest Keynote version is installed + community.general.mas: + id: 409183694 + state: latest + +- name: Upgrade all installed Mac App Store apps + community.general.mas: + upgrade_all: true + +- name: Install specific apps and also upgrade all others + community.general.mas: + id: + - 409183694 # Keynote + - 413857545 # Divvy + state: present + upgrade_all: true + +- name: Uninstall Divvy + community.general.mas: + id: 413857545 + state: absent + become: true # Uninstallation requires root permissions +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule +import os + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class Mas(object): + + def __init__(self, module): + self.module = module + + # Initialize data properties + self.mas_path = self.module.get_bin_path('mas') + self._checked_signin = False + self._installed = None # Populated only if needed + self._outdated = None # Populated only if needed + self.count_install = 0 + self.count_upgrade = 0 + self.count_uninstall = 0 + self.result = { + 'changed': False + } + + self.check_mas_tool() + + def app_command(self, command, id): + ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' ''' + + if not self.module.check_mode: + if command != 'uninstall': + self.check_signin() + + rc, out, err = self.run([command, str(id)]) + if rc != 0: + self.module.fail_json( + msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip()) + ) + + # No error or dry run + self.__dict__['count_' + command] += 1 + + def check_mas_tool(self): + ''' Verifies that the `mas` tool is available in a recent version ''' + + # Is the `mas` tool available at all? + if not self.mas_path: + self.module.fail_json(msg='Required `mas` tool is not installed') + + # Is the version recent enough? + rc, out, err = self.run(['version']) + if rc != 0 or not out.strip() or LooseVersion(out.strip()) < LooseVersion('1.5.0'): + self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip()) + + def check_signin(self): + ''' Verifies that the user is signed in to the Mac App Store ''' + + # Only check this once per execution + if self._checked_signin: + return + + rc, out, err = self.run(['account']) + if out.split("\n", 1)[0].rstrip() == 'Not signed in': + self.module.fail_json(msg='You must be signed in to the Mac App Store') + + self._checked_signin = True + + def exit(self): + ''' Exit with the data we have collected over time ''' + + msgs = [] + if self.count_install > 0: + msgs.append('Installed {0} app(s)'.format(self.count_install)) + if self.count_upgrade > 0: + msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade)) + if self.count_uninstall > 0: + msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall)) + + if msgs: + self.result['changed'] = True + self.result['msg'] = ', '.join(msgs) + + self.module.exit_json(**self.result) + + def get_current_state(self, command): + ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' ''' + + rc, raw_apps, err = self.run([command]) + rows = raw_apps.split("\n") + if rows[0] == "No installed apps found": + rows = [] + apps = [] + for r in rows: + # Format: "123456789 App Name" + r = r.split(' ', 1) + if len(r) == 2: + apps.append(int(r[0])) + + return apps + + def installed(self): + ''' Returns the list of installed apps ''' + + # Populate cache if not already done + if self._installed is None: + self._installed = self.get_current_state('list') + + return self._installed + + def is_installed(self, id): + ''' Checks whether the given app is installed ''' + + return int(id) in self.installed() + + def is_outdated(self, id): + ''' Checks whether the given app is installed, but outdated ''' + + return int(id) in self.outdated() + + def outdated(self): + ''' Returns the list of installed, but outdated apps ''' + + # Populate cache if not already done + if self._outdated is None: + self._outdated = self.get_current_state('outdated') + + return self._outdated + + def run(self, cmd): + ''' Runs a command of the `mas` tool ''' + + cmd.insert(0, self.mas_path) + return self.module.run_command(cmd, False) + + def upgrade_all(self): + ''' Upgrades all installed apps and sets the correct result data ''' + + outdated = self.outdated() + + if not self.module.check_mode: + self.check_signin() + + rc, out, err = self.run(['upgrade']) + if rc != 0: + self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip()) + + self.count_upgrade += len(outdated) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='list', elements='int'), + state=dict(type='str', default='present', choices=['absent', 'latest', 'present']), + upgrade_all=dict(type='bool', default=False, aliases=['upgrade']), + ), + supports_check_mode=True + ) + mas = Mas(module) + + if module.params['id']: + apps = module.params['id'] + else: + apps = [] + + state = module.params['state'] + upgrade = module.params['upgrade_all'] + + # Run operations on the given app IDs + for app in sorted(set(apps)): + if state == 'present': + if not mas.is_installed(app): + mas.app_command('install', app) + + elif state == 'absent': + if mas.is_installed(app): + # Ensure we are root + if os.getuid() != 0: + module.fail_json(msg="Uninstalling apps requires root permissions ('become: true')") + + mas.app_command('uninstall', app) + + elif state == 'latest': + if not mas.is_installed(app): + mas.app_command('install', app) + elif mas.is_outdated(app): + mas.app_command('upgrade', app) + + # Upgrade all apps if requested + mas._outdated = None # Clear cache + if upgrade and mas.outdated(): + mas.upgrade_all() + + # Exit with the collected data + mas.exit() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/matrix.py b/ansible_collections/community/general/plugins/modules/matrix.py new file mode 100644 index 000000000..0b419c8d9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/matrix.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# coding: utf-8 + +# Copyright (c) 2018, Jan Christian Grünhage +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: "Jan Christian Grünhage (@jcgruenhage)" +module: matrix +short_description: Send notifications to matrix +description: + - This module sends html formatted notifications to matrix rooms. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + msg_plain: + type: str + description: + - Plain text form of the message to send to matrix, usually markdown + required: true + msg_html: + type: str + description: + - HTML form of the message to send to matrix + required: true + room_id: + type: str + description: + - ID of the room to send the notification to + required: true + hs_url: + type: str + description: + - URL of the homeserver, where the CS-API is reachable + required: true + token: + type: str + description: + - Authentication token for the API call. If provided, user_id and password are not required + user_id: + type: str + description: + - The user id of the user + password: + type: str + description: + - The password to log in with +requirements: + - matrix-client (Python library) +''' + +EXAMPLES = ''' +- name: Send matrix notification with token + community.general.matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + token: "{{ matrix_auth_token }}" + +- name: Send matrix notification with user_id and password + community.general.matrix: + msg_plain: "**hello world**" + msg_html: "hello world" + room_id: "!12345678:server.tld" + hs_url: "https://matrix.org" + user_id: "ansible_notification_bot" + password: "{{ matrix_auth_password }}" +''' + +RETURN = ''' +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +MATRIX_IMP_ERR = None +try: + from matrix_client.client import MatrixClient +except ImportError: + MATRIX_IMP_ERR = traceback.format_exc() + matrix_found = False +else: + matrix_found = True + + +def run_module(): + module_args = dict( + msg_plain=dict(type='str', required=True), + msg_html=dict(type='str', required=True), + room_id=dict(type='str', required=True), + hs_url=dict(type='str', required=True), + token=dict(type='str', required=False, no_log=True), + user_id=dict(type='str', required=False), + password=dict(type='str', required=False, no_log=True), + ) + + result = dict( + changed=False, + message='' + ) + + module = AnsibleModule( + argument_spec=module_args, + mutually_exclusive=[['password', 'token']], + required_one_of=[['password', 'token']], + required_together=[['user_id', 'password']], + supports_check_mode=True + ) + + if not matrix_found: + module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR) + + if module.check_mode: + return result + + # create a client object + client = MatrixClient(module.params['hs_url']) + if module.params['token'] is not None: + client.api.token = module.params['token'] + else: + client.login(module.params['user_id'], module.params['password'], sync=False) + + # make sure we are in a given room and return a room object for it + room = client.join_room(module.params['room_id']) + # send an html formatted messages + room.send_html(module.params['msg_html'], module.params['msg_plain']) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mattermost.py b/ansible_collections/community/general/plugins/modules/mattermost.py new file mode 100644 index 000000000..29894c3a7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mattermost.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Benjamin Jolivot +# Inspired by slack module : +# # Copyright (c) 2017, Steve Pletcher +# # Copyright (c) 2016, René Moser +# # Copyright (c) 2015, Stefan Berggren +# # Copyright (c) 2014, Ramon de la Fuente ) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: mattermost +short_description: Send Mattermost notifications +description: + - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. +author: "Benjamin Jolivot (@bjolivot)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + type: str + description: + - Mattermost url (i.e. http://mattermost.yourcompany.com). + required: true + api_key: + type: str + description: + - Mattermost webhook api key. Log into your mattermost site, go to + Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. + This will give you full URL. api_key is the last part. + http://mattermost.example.com/hooks/C(API_KEY) + required: true + text: + type: str + description: + - Text to send. Note that the module does not handle escaping characters. + - Required when I(attachments) is not set. + attachments: + type: list + elements: dict + description: + - Define a list of attachments. + - For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/). + - Required when I(text) is not set. + version_added: 4.3.0 + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key). + username: + type: str + description: + - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. + default: Ansible + icon_url: + type: str + description: + - URL for the message sender's icon. + default: https://docs.ansible.com/favicon.ico + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: true + type: bool +''' + +EXAMPLES = """ +- name: Send notification message via Mattermost + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + +- name: Send notification message via Mattermost all options + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + text: '{{ inventory_hostname }} completed' + channel: notifications + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + +- name: Send attachments message via Mattermost + community.general.mattermost: + url: http://mattermost.example.com + api_key: my_api_key + attachments: + - text: Display my system load on host A and B + color: '#ff00dd' + title: System load + fields: + - title: System A + value: "load average: 0,74, 0,66, 0,63" + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true +""" + +RETURN = ''' +payload: + description: Mattermost payload + returned: success + type: str +webhook_url: + description: URL the webhook is sent to + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + url=dict(type='str', required=True), + api_key=dict(type='str', required=True, no_log=True), + text=dict(type='str'), + channel=dict(type='str', default=None), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + validate_certs=dict(default=True, type='bool'), + attachments=dict(type='list', elements='dict'), + ), + required_one_of=[ + ('text', 'attachments'), + ], + ) + # init return dict + result = dict(changed=False, msg="OK") + + # define webhook + webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key']) + result['webhook_url'] = webhook_url + + # define payload + payload = {} + for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: + if module.params[param] is not None: + payload[param] = module.params[param] + + payload = module.jsonify(payload) + result['payload'] = payload + + # http headers + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + + # notes: + # Nothing is done in check mode + # it'll pass even if your server is down or/and if your token is invalid. + # If someone find good way to check... + + # send request if not in test mode + if module.check_mode is False: + response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload) + + # something's wrong + if info['status'] != 200: + # some problem + result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg']) + module.fail_json(**result) + + # Looks good + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/maven_artifact.py b/ansible_collections/community/general/plugins/modules/maven_artifact.py new file mode 100644 index 000000000..3f9defa52 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/maven_artifact.py @@ -0,0 +1,762 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: maven_artifact +short_description: Downloads an Artifact from a Maven Repository +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. + - Can retrieve snapshots or release versions of the artifact and will resolve the latest available + version if one is not available. +author: "Chris Schmidt (@chrisisbeef)" +requirements: + - lxml + - boto if using a S3 repository (s3://...) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + group_id: + type: str + description: + - The Maven groupId coordinate + required: true + artifact_id: + type: str + description: + - The maven artifactId coordinate + required: true + version: + type: str + description: + - The maven version coordinate + - Mutually exclusive with I(version_by_spec). + version_by_spec: + type: str + description: + - The maven dependency version ranges. + - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) + - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. + - Mutually exclusive with I(version). + version_added: '0.2.0' + classifier: + type: str + description: + - The maven classifier coordinate + default: '' + extension: + type: str + description: + - The maven type/extension coordinate + default: jar + repository_url: + type: str + description: + - The URL of the Maven Repository to download from. + - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. + - Use file://... if the repository is local, added in version 2.6 + default: https://repo1.maven.org/maven2 + username: + type: str + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 + aliases: [ "aws_secret_key" ] + password: + type: str + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 + aliases: [ "aws_secret_access_key" ] + headers: + description: + - Add custom HTTP headers to a request in hash/dict format. + type: dict + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + default: false + type: bool + version_added: '0.2.0' + dest: + type: path + description: + - The path where the artifact should be written to + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file + required: true + state: + type: str + description: + - The desired state of the artifact + default: present + choices: [present,absent] + timeout: + type: int + description: + - Specifies a timeout in seconds for the connection attempt + default: 10 + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be set to C(false) when no other option exists. + type: bool + default: true + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - This file can also include the key as well, and if the key is included, I(client_key) is not required. + type: path + version_added: '1.3.0' + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - If I(client_cert) contains both the certificate and key, this option is not required. + type: path + version_added: '1.3.0' + keep_name: + description: + - If C(true), the downloaded artifact's name is preserved, i.e the version number remains part of it. + - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec) + is defined. + type: bool + default: false + verify_checksum: + type: str + description: + - If C(never), the MD5/SHA1 checksum will never be downloaded and verified. + - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. + - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, + to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) + downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error + if the artifact has not been cached yet, it may fail unexpectedly. + If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to + use it to verify integrity after download. + - C(always) combines C(download) and C(change). + required: false + default: 'download' + choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If C(md5), checksums will use the MD5 algorithm. This is the default. + - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use + FIPS-compliant algorithms, since MD5 will be blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 + unredirected_headers: + type: list + elements: str + version_added: 5.2.0 + description: + - A list of headers that should not be included in the redirection. This headers are sent to the fetch_url C(fetch_url) function. + - On ansible-core version 2.12 or later, the default of this option is C([Authorization, Cookie]). + - Useful if the redirection URL does not need to have sensitive headers in the request. + - Requires ansible-core version 2.12 or later. + directory_mode: + type: str + description: + - Filesystem permission mode applied recursively to I(dest) when it is a directory. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Download the latest version of the JUnit framework artifact from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + +- name: Download JUnit 4.11 from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + version: 4.11 + dest: /tmp/junit-4.11.jar + +- name: Download an artifact from a private repository requiring authentication + community.general.maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + username: user + password: pass + dest: /tmp/library-name-latest.jar + +- name: Download an artifact from a private repository requiring certificate authentication + community.general.maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + client_cert: /path/to/cert.pem + client_key: /path/to/key.pem + dest: /tmp/library-name-latest.jar + +- name: Download a WAR File to the Tomcat webapps directory to be deployed + community.general.maven_artifact: + group_id: com.company + artifact_id: web-app + extension: war + repository_url: 'https://repo.company.com/maven' + dest: /var/lib/tomcat7/webapps/web-app.war + +- name: Keep a downloaded artifact's name, i.e. retain the version + community.general.maven_artifact: + version: latest + artifact_id: spring-core + group_id: org.springframework + dest: /tmp/ + keep_name: true + +- name: Download the latest version of the JUnit framework artifact from Maven local + community.general.maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository" + +- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central + community.general.maven_artifact: + group_id: junit + artifact_id: junit + version_by_spec: "[3.8,4.0)" + dest: /tmp/ +''' + +import hashlib +import os +import posixpath +import shutil +import io +import tempfile +import traceback +import re + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.ansible_release import __version__ as ansible_version +from re import match + +LXML_ETREE_IMP_ERR = None +try: + from lxml import etree + HAS_LXML_ETREE = True +except ImportError: + LXML_ETREE_IMP_ERR = traceback.format_exc() + HAS_LXML_ETREE = False + +BOTO_IMP_ERR = None +try: + import boto3 + HAS_BOTO = True +except ImportError: + BOTO_IMP_ERR = traceback.format_exc() + HAS_BOTO = False + +SEMANTIC_VERSION_IMP_ERR = None +try: + from semantic_version import Version, Spec + HAS_SEMANTIC_VERSION = True +except ImportError: + SEMANTIC_VERSION_IMP_ERR = traceback.format_exc() + HAS_SEMANTIC_VERSION = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text + + +def split_pre_existing_dir(dirname): + ''' + Return the first pre-existing directory and a list of the new directories that will be created. + ''' + head, tail = os.path.split(dirname) + b_head = to_bytes(head, errors='surrogate_or_strict') + if not os.path.exists(b_head): + if head == dirname: + return None, [head] + else: + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) + else: + return head, [tail] + new_directory_list.append(tail) + return pre_existing_dir, new_directory_list + + +def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): + ''' + Walk the new directories list and make sure that permissions are as we would expect + ''' + if new_directory_list: + first_sub_dir = new_directory_list.pop(0) + if not pre_existing_dir: + working_dir = first_sub_dir + else: + working_dir = os.path.join(pre_existing_dir, first_sub_dir) + directory_args['path'] = working_dir + changed = module.set_fs_attributes_if_different(directory_args, changed) + changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) + return changed + + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.version_by_spec = version_by_spec + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) + if with_version and self.version: + timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version) + if timestamp_version_match: + base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT") + else: + base = posixpath.join(base, self.version) + return base + + def _generate_filename(self): + filename = self.artifact_id + "-" + self.classifier + "." + self.extension + if not self.classifier: + filename = self.artifact_id + "." + self.extension + return filename + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + if self.classifier: + result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + return result + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[-1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, module, base, local=False, headers=None): + self.module = module + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.local = local + self.headers = headers + self.user_agent = "Ansible {0} maven_artifact".format(ansible_version) + self.latest_version_found = None + self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml" + + def find_version_by_spec(self, artifact): + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + original_versions = xml.xpath("/metadata/versioning/versions/version/text()") + versions = [] + for version in original_versions: + try: + versions.append(Version.coerce(version)) + except ValueError: + # This means that version string is not a valid semantic versioning + pass + + parse_versions_syntax = { + # example -> (,1.0] + r"^\(,(?P[0-9.]*)]$": "<={upper_bound}", + # example -> 1.0 + r"^(?P[0-9.]*)$": "~={version}", + # example -> [1.0] + r"^\[(?P[0-9.]*)\]$": "=={version}", + # example -> [1.2, 1.3] + r"^\[(?P[0-9.]*),\s*(?P[0-9.]*)\]$": ">={lower_bound},<={upper_bound}", + # example -> [1.2, 1.3) + r"^\[(?P[0-9.]*),\s*(?P[0-9.]+)\)$": ">={lower_bound},<{upper_bound}", + # example -> [1.5,) + r"^\[(?P[0-9.]*),\)$": ">={lower_bound}", + } + + for regex, spec_format in parse_versions_syntax.items(): + regex_result = match(regex, artifact.version_by_spec) + if regex_result: + spec = Spec(spec_format.format(**regex_result.groupdict())) + selected_version = spec.select(versions) + + if not selected_version: + raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec)) + + # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0) + if str(selected_version) not in original_versions: + selected_version.patch = None + + return str(selected_version) + + raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec)) + + def find_latest_version_available(self, artifact): + if self.latest_version_found: + return self.latest_version_found + path = "/%s/%s" % (artifact.path(False), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + self.latest_version_found = v[0] + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.version_by_spec: + artifact.version = self.find_version_by_spec(artifact) + + if artifact.version == "latest": + artifact.version = self.find_latest_version_available(artifact) + + if artifact.is_snapshot(): + if self.local: + return self._uri_for_artifact(artifact, artifact.version) + path = "/%s/%s" % (artifact.path(), self.metadata_file_name) + content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path) + xml = etree.fromstring(content) + + for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): + classifier = snapshotArtifact.xpath("classifier/text()") + artifact_classifier = classifier[0] if classifier else '' + extension = snapshotArtifact.xpath("extension/text()") + artifact_extension = extension[0] if extension else '' + if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension: + return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) + timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()") + if timestamp_xmlpath: + timestamp = timestamp_xmlpath[0] + build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number)) + + return self._uri_for_artifact(artifact, artifact.version) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) + + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) + + # for small files, directly get the full content + def _getContent(self, url, failmsg, force=True): + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + with io.open(parsed_url.path, 'rb') as f: + return f.read() + if force: + raise ValueError(failmsg + " because can not find file: " + url) + return None + response = self._request(url, failmsg, force) + if response: + return response.read() + return None + + # only for HTTP request + def _request(self, url, failmsg, force=True): + url_to_use = url + parsed_url = urlparse(url) + + if parsed_url.scheme == 's3': + parsed_url = urlparse(url) + bucket_name = parsed_url.netloc + key_name = parsed_url.path[1:] + client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) + url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10) + + req_timeout = self.module.params.get('timeout') + + # Hack to add parameters in the way that fetch_url expects + self.module.params['url_username'] = self.module.params.get('username', '') + self.module.params['url_password'] = self.module.params.get('password', '') + self.module.params['http_agent'] = self.user_agent + + kwargs = {} + if self.module.params['unredirected_headers']: + kwargs['unredirected_headers'] = self.module.params['unredirected_headers'] + + response, info = fetch_url( + self.module, + url_to_use, + timeout=req_timeout, + headers=self.headers, + **kwargs + ) + + if info['status'] == 200: + return response + if force: + raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) + return None + + def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): + if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": + artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, + artifact.classifier, artifact.extension) + url = self.find_uri_for_artifact(artifact) + tempfd, tempname = tempfile.mkstemp(dir=tmpdir) + + try: + # copy to temp file + if self.local: + parsed_url = urlparse(url) + if os.path.isfile(parsed_url.path): + shutil.copy2(parsed_url.path, tempname) + else: + return "Can not find local file: " + parsed_url.path + else: + response = self._request(url, "Failed to download artifact " + str(artifact)) + with os.fdopen(tempfd, 'wb') as f: + shutil.copyfileobj(response, f) + + if verify_download: + invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg) + if invalid_checksum: + # if verify_change was set, the previous file would be deleted + os.remove(tempname) + return invalid_checksum + except Exception as e: + os.remove(tempname) + raise e + + # all good, now copy temp file to target + shutil.move(tempname, artifact.get_filename(filename)) + return None + + def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): + if os.path.exists(file): + local_checksum = self._local_checksum(checksum_alg, file) + if self.local: + parsed_url = urlparse(remote_url) + remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) + else: + try: + remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict') + except UnicodeError as e: + return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e)) + if not remote_checksum: + return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) + try: + # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename + _remote_checksum = remote_checksum.split(None, 1)[0] + remote_checksum = _remote_checksum + # remote_checksum is empty so we continue and keep original checksum string + # This should not happen since we check for remote_checksum before + except IndexError: + pass + if local_checksum.lower() == remote_checksum.lower(): + return None + else: + return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum + + return "Path does not exist: " + file + + def _local_checksum(self, checksum_alg, file): + if checksum_alg.lower() == 'md5': + hash = hashlib.md5() + elif checksum_alg.lower() == 'sha1': + hash = hashlib.sha1() + else: + raise ValueError("Unknown checksum_alg %s" % checksum_alg) + with io.open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), b''): + hash.update(chunk) + return hash.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + group_id=dict(required=True), + artifact_id=dict(required=True), + version=dict(default=None), + version_by_spec=dict(default=None), + classifier=dict(default=''), + extension=dict(default='jar'), + repository_url=dict(default='https://repo1.maven.org/maven2'), + username=dict(default=None, aliases=['aws_secret_key']), + password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), + headers=dict(type='dict'), + force_basic_auth=dict(default=False, type='bool'), + state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state + timeout=dict(default=10, type='int'), + dest=dict(type="path", required=True), + validate_certs=dict(required=False, default=True, type='bool'), + client_cert=dict(type="path", required=False), + client_key=dict(type="path", required=False), + keep_name=dict(required=False, default=False, type='bool'), + verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), + unredirected_headers=dict(type='list', elements='str', required=False), + directory_mode=dict(type='str'), + ), + add_file_common_args=True, + mutually_exclusive=([('version', 'version_by_spec')]) + ) + + if LooseVersion(ansible_version) < LooseVersion("2.12") and module.params['unredirected_headers']: + module.fail_json(msg="Unredirected Headers parameter provided, but your ansible-core version does not support it. Minimum version is 2.12") + + if LooseVersion(ansible_version) >= LooseVersion("2.12") and module.params['unredirected_headers'] is None: + # if the user did not supply unredirected params, we use the default, ONLY on ansible core 2.12 and above + module.params['unredirected_headers'] = ['Authorization', 'Cookie'] + + if not HAS_LXML_ETREE: + module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) + + if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: + module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) + + repository_url = module.params["repository_url"] + if not repository_url: + repository_url = "https://repo1.maven.org/maven2" + try: + parsed_url = urlparse(repository_url) + except AttributeError as e: + module.fail_json(msg='url parsing went wrong %s' % e) + + local = parsed_url.scheme == "file" + + if parsed_url.scheme == 's3' and not HAS_BOTO: + module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'), + exception=BOTO_IMP_ERR) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + version_by_spec = module.params["version_by_spec"] + classifier = module.params["classifier"] + extension = module.params["extension"] + headers = module.params['headers'] + state = module.params["state"] + dest = module.params["dest"] + b_dest = to_bytes(dest, errors='surrogate_or_strict') + keep_name = module.params["keep_name"] + verify_checksum = module.params["verify_checksum"] + verify_download = verify_checksum in ['download', 'always'] + verify_change = verify_checksum in ['change', 'always'] + checksum_alg = module.params["checksum_alg"] + + downloader = MavenDownloader(module, repository_url, local, headers) + + if not version_by_spec and not version: + version = "latest" + + try: + artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + changed = False + prev_state = "absent" + + if dest.endswith(os.sep): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if not os.path.exists(b_dest): + (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) + os.makedirs(b_dest) + directory_args = module.load_file_common_arguments(module.params) + directory_mode = module.params["directory_mode"] + if directory_mode is not None: + directory_args['mode'] = directory_mode + else: + directory_args['mode'] = None + changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) + + if os.path.isdir(b_dest): + version_part = version + if version == 'latest': + version_part = downloader.find_latest_version_available(artifact) + elif version_by_spec: + version_part = downloader.find_version_by_spec(artifact) + + filename = "{artifact_id}{version_part}{classifier}.{extension}".format( + artifact_id=artifact_id, + version_part="-{0}".format(version_part) if keep_name else "", + classifier="-{0}".format(classifier) if classifier else "", + extension=extension + ) + dest = posixpath.join(dest, filename) + + b_dest = to_bytes(dest, errors='surrogate_or_strict') + + if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): + prev_state = "present" + + if prev_state == "absent": + try: + download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) + if download_error is None: + changed = True + else: + module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + file_args = module.load_file_common_arguments(module.params, path=dest) + changed = module.set_fs_attributes_if_different(file_args, changed) + if changed: + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, + extension=extension, repository_url=repository_url, changed=changed) + else: + module.exit_json(state=state, dest=dest, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py new file mode 100644 index 000000000..a1168724f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_dns_reload +author: "Simon Weald (@glitchcrab)" +short_description: Request reload of Memset's DNS infrastructure, +notes: + - DNS reload requests are a best-effort service provided by Memset; these generally + happen every 15 minutes by default, however you can request an immediate reload if + later tasks rely on the records being created. An API key generated via the + Memset customer control panel is required with the following minimum scope - + I(dns.reload). If you wish to poll the job status to wait until the reload has + completed, then I(job.status) is also required. +description: + - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + poll: + default: false + type: bool + description: + - Boolean value, if set will poll the reload job's status and return + when the job has completed (unless the 30 second timeout is reached first). + If the timeout is reached then the task will not be marked as failed, but + stderr will indicate that the polling failed. +''' + +EXAMPLES = ''' +- name: Submit DNS reload and poll + community.general.memset_dns_reload: + api_key: 5eb86c9196ab03919abcf03857163741 + poll: true + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Raw response from the Memset API. + returned: always + type: complex + contains: + error: + description: Whether the job ended in error state. + returned: always + type: bool + sample: true + finished: + description: Whether the job completed before the result was returned. + returned: always + type: bool + sample: true + id: + description: Job ID. + returned: always + type: str + sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" + status: + description: Job status. + returned: always + type: str + sample: "DONE" + type: + description: Job type. + returned: always + type: str + sample: "dns" +''' + +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def poll_reload_status(api_key=None, job_id=None, payload=None): + ''' + We poll the `job.status` endpoint every 5 seconds up to a + maximum of 6 times. This is a relatively arbitrary choice of + timeout, however requests rarely take longer than 15 seconds + to complete. + ''' + memset_api, stderr, msg = None, None, None + payload['id'] = job_id + + api_method = 'job.status' + _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + + while not response.json()['finished']: + counter = 0 + while counter < 6: + sleep(5) + _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) + counter += 1 + if response.json()['error']: + # the reload job was submitted but polling failed. Don't return this as an overall task failure. + stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." + else: + memset_api = response.json() + msg = None + + return memset_api, msg, stderr + + +def reload_dns(args=None): + ''' + DNS reloads are a single API call and therefore there's not much + which can go wrong outside of auth errors. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + memset_api, msg, stderr = None, None, None + + api_method = 'dns.reload' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + if response.status_code is not None: + retvals['memset_api'] = response.json() + else: + retvals['stderr'] = response.stderr + retvals['msg'] = msg + return retvals + + # set changed to true if the reload request was accepted. + has_changed = True + memset_api = msg + # empty msg var as we don't want to return the API's json response twice. + msg = None + + if args['poll']: + # hand off to the poll function. + job_id = response.json()['id'] + memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) + + # assemble return variables. + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + poll=dict(required=False, default=False, type='bool') + ), + supports_check_mode=False + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = reload_dns(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py new file mode 100644 index 000000000..5fc9d79e1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_memstore_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve Memstore product usage information +notes: + - An API key generated via the Memset customer control panel is needed with the + following minimum scope - I(memstore.usage). +description: + - Retrieve Memstore product usage information. + - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The Memstore product name (i.e. C(mstestyaa1)). +''' + +EXAMPLES = ''' +- name: Get usage for mstestyaa1 + community.general.memset_memstore_info: + name: mstestyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Info from the Memset API + returned: always + type: complex + contains: + cdn_bandwidth: + description: Dictionary of CDN bandwidth facts + returned: always + type: complex + contains: + bytes_out: + description: Outbound CDN bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound CDN bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + containers: + description: Number of containers + returned: always + type: int + sample: 10 + bytes: + description: Space used in bytes + returned: always + type: int + sample: 3860997965 + objs: + description: Number of objects + returned: always + type: int + sample: 1000 + bandwidth: + description: Dictionary of CDN bandwidth facts + returned: always + type: complex + contains: + bytes_out: + description: Outbound bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 + requests: + description: Number of requests in the last 24 hours + returned: always + type: int + sample: 10 + bytes_in: + description: Inbound bandwidth for the last 24 hours in bytes + returned: always + type: int + sample: 1000 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'memstore.usage' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_server_info.py b/ansible_collections/community/general/plugins/modules/memset_server_info.py new file mode 100644 index 000000000..ecc0375eb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_server_info.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_server_info +author: "Simon Weald (@glitchcrab)" +short_description: Retrieve server information +notes: + - An API key generated via the Memset customer control panel is needed with the + following minimum scope - I(server.info). +description: + - Retrieve server information. + - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The server product name (i.e. C(testyaa1)). +''' + +EXAMPLES = ''' +- name: Get details for testyaa1 + community.general.memset_server_info: + name: testyaa1 + api_key: 5eb86c9896ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +--- +memset_api: + description: Info from the Memset API + returned: always + type: complex + contains: + backups: + description: Whether this server has a backup service. + returned: always + type: bool + sample: true + control_panel: + description: Whether the server has a control panel (i.e. cPanel). + returned: always + type: str + sample: 'cpanel' + data_zone: + description: The data zone the server is in. + returned: always + type: str + sample: 'Memset Public Cloud' + expiry_date: + description: Current expiry date of the server. + returned: always + type: str + sample: '2018-08-10' + firewall_rule_group: + description: Details about the firewall group this server is in. + returned: always + type: dict + sample: { + "default_outbound_policy": "RETURN", + "name": "testyaa-fw1", + "nickname": "testyaa cPanel rules", + "notes": "", + "public": false, + "rules": { + "51d7db54d39c3544ef7c48baa0b9944f": { + "action": "ACCEPT", + "comment": "", + "dest_ip6s": "any", + "dest_ips": "any", + "dest_ports": "any", + "direction": "Inbound", + "ip_version": "any", + "ordering": 2, + "protocols": "icmp", + "rule_group_name": "testyaa-fw1", + "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", + "source_ip6s": "any", + "source_ips": "any", + "source_ports": "any" + } + } + } + firewall_type: + description: The type of firewall the server has (i.e. self-managed, managed). + returned: always + type: str + sample: 'managed' + host_name: + description: The server's hostname. + returned: always + type: str + sample: 'testyaa1.miniserver.com' + ignore_monitoring_off: + description: When true, Memset won't remind the customer that monitoring is disabled. + returned: always + type: bool + sample: true + ips: + description: List of dictionaries of all IP addresses assigned to the server. + returned: always + type: list + sample: [ + { + "address": "1.2.3.4", + "bytes_in_today": 1000.0, + "bytes_in_yesterday": 2000.0, + "bytes_out_today": 1000.0, + "bytes_out_yesterday": 2000.0 + } + ] + monitor: + description: Whether the server has monitoring enabled. + returned: always + type: bool + sample: true + monitoring_level: + description: The server's monitoring level (i.e. basic). + returned: always + type: str + sample: 'basic' + name: + description: Server name (same as the service name). + returned: always + type: str + sample: 'testyaa1' + network_zones: + description: The network zone(s) the server is in. + returned: always + type: list + sample: [ 'reading' ] + nickname: + description: Customer-set nickname for the server. + returned: always + type: str + sample: 'database server' + no_auto_reboot: + description: Whether or not to reboot the server if monitoring detects it down. + returned: always + type: bool + sample: true + no_nrpe: + description: Whether Memset should use NRPE to monitor this server. + returned: always + type: bool + sample: true + os: + description: The server's Operating System. + returned: always + type: str + sample: 'debian_stretch_64' + penetration_patrol: + description: Intrusion detection support level for this server. + returned: always + type: str + sample: 'managed' + penetration_patrol_alert_level: + description: The alert level at which notifications are sent. + returned: always + type: int + sample: 10 + primary_ip: + description: Server's primary IP. + returned: always + type: str + sample: '1.2.3.4' + renewal_price_amount: + description: Renewal cost for the server. + returned: always + type: str + sample: '30.00' + renewal_price_currency: + description: Currency for renewal payments. + returned: always + type: str + sample: 'GBP' + renewal_price_vat: + description: VAT rate for renewal payments + returned: always + type: str + sample: '20' + start_date: + description: Server's start date. + returned: always + type: str + sample: '2013-04-10' + status: + description: Current status of the server (i.e. live, onhold). + returned: always + type: str + sample: 'LIVE' + support_level: + description: Support level included with the server. + returned: always + type: str + sample: 'managed' + type: + description: What this server is (i.e. dedicated) + returned: always + type: str + sample: 'miniserver' + vlans: + description: Dictionary of tagged and untagged VLANs this server is in. + returned: always + type: dict + sample: { + tagged: [], + untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] + } + vulnscan: + description: Vulnerability scanning level. + returned: always + type: str + sample: 'basic' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def get_facts(args=None): + ''' + Performs a simple API call and returns a JSON blob. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + payload['name'] = args['name'] + + api_method = 'server.info' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals + + # we don't want to return the same thing twice + msg = None + memset_api = response.json() + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, type='str') + ), + supports_check_mode=True, + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + + retvals = get_facts(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_zone.py b/ansible_collections/community/general/plugins/modules/memset_zone.py new file mode 100644 index 000000000..e17472e39 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_zone.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_zone +author: "Simon Weald (@glitchcrab)" +short_description: Creates and deletes Memset DNS zones +notes: + - Zones can be thought of as a logical group of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). +description: + - Manage DNS zones in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + required: true + description: + - Indicates desired state of resource. + type: str + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + name: + required: true + description: + - The zone nickname; usually the same as the main domain. Ensure this + value has at most 250 characters. + type: str + aliases: [ nickname ] + ttl: + description: + - The default TTL for all records created in the zone. This must be a + valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). + type: int + default: 0 + choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] + force: + required: false + default: false + type: bool + description: + - Forces deletion of a zone and all zone domains/zone records it contains. +''' + +EXAMPLES = ''' +# Create the zone 'test' +- name: Create zone + community.general.memset_zone: + name: test + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + ttl: 300 + delegate_to: localhost + +# Force zone deletion +- name: Force delete zone + community.general.memset_zone: + name: test + state: absent + api_key: 5eb86c9196ab03919abcf03857163741 + force: true + delegate_to: localhost +''' + +RETURN = ''' +memset_api: + description: Zone info from the Memset API + returned: when state == present + type: complex + contains: + domains: + description: List of domains in this zone + returned: always + type: list + sample: [] + id: + description: Zone id + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + nickname: + description: Zone name + returned: always + type: str + sample: "example.com" + records: + description: List of DNS records for domains in this zone + returned: always + type: list + sample: [] + ttl: + description: Default TTL for domains in this zone + returned: always + type: int + sample: 300 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import check_zone +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + # zone domain length must be less than 250 chars. + if len(args['name']) > 250: + stderr = 'Zone name must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr, stderr=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + + api_method = 'dns.zone_list' + has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, counter = check_zone(data=response, name=args['name']) + + # set changed to true if the operation would cause a change. + has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return retvals + + +def create_zone(args=None, zone_exists=None, payload=None): + ''' + At this point we already know whether the zone exists, so we + just need to make the API reflect the desired state. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if not zone_exists: + payload['ttl'] = args['ttl'] + payload['nickname'] = args['name'] + api_method = 'dns.zone_create' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + else: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + for zone in response.json(): + if zone['nickname'] == args['name']: + break + if zone['ttl'] != args['ttl']: + # update the zone if the desired TTL is different. + payload['id'] = zone['id'] + payload['ttl'] = args['ttl'] + api_method = 'dns.zone_update' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + # populate return var with zone info. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if zone_exists: + payload = dict() + payload['id'] = zone_id + api_method = 'dns.zone_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + memset_api = response.json() + + return has_failed, has_changed, memset_api, msg + + +def delete_zone(args=None, zone_exists=None, payload=None): + ''' + Deletion requires extra sanity checking as the zone cannot be + deleted if it contains domains or records. Setting force=true + will override this behaviour. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + if zone_exists: + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + counter = 0 + for zone in response.json(): + if zone['nickname'] == args['name']: + counter += 1 + if counter == 1: + for zone in response.json(): + if zone['nickname'] == args['name']: + zone_id = zone['id'] + domain_count = len(zone['domains']) + record_count = len(zone['records']) + if (domain_count > 0 or record_count > 0) and args['force'] is False: + # we need to fail out if force was not explicitly set. + stderr = 'Zone contains domains or records and force was not used.' + has_failed = True + has_changed = False + module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1) + api_method = 'dns.zone_delete' + payload['id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice. + memset_api = msg + msg = None + else: + # zone names are not unique, so we cannot safely delete the requested + # zone at this time. + has_failed = True + has_changed = False + msg = 'Unable to delete zone as multiple zones with the same name exist.' + else: + has_failed, has_changed = False, False + + return has_failed, has_changed, memset_api, msg + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = _msg + + if response.stderr is not None: + retvals['stderr'] = response.stderr + + return retvals + + zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) + + if args['state'] == 'present': + has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload) + + elif args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload) + + retvals['failed'] = has_failed + retvals['changed'] = has_changed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + name=dict(required=True, aliases=['nickname'], type='str'), + ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + force=dict(required=False, default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py new file mode 100644 index 000000000..172a48be2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_zone_domain +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete domains in Memset DNS zones +notes: + - Zone domains can be thought of as a collection of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). + - Currently this module can only create one domain at a time. Multiple domains should + be created using C(with_items). +description: + - Manage DNS zone domains in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + domain: + required: true + description: + - The zone domain name. Ensure this value has at most 250 characters. + type: str + aliases: ['name'] + zone: + required: true + description: + - The zone to add the domain to (this must already exist). + type: str +''' + +EXAMPLES = ''' +# Create the zone domain 'test.com' +- name: Create zone domain + community.general.memset_zone_domain: + domain: test.com + zone: testzone + state: present + api_key: 5eb86c9196ab03919abcf03857163741 + delegate_to: localhost +''' + +RETURN = ''' +memset_api: + description: Domain info from the Memset API + returned: when changed or state == present + type: complex + contains: + domain: + description: Domain name + returned: always + type: str + sample: "example.com" + id: + description: Domain ID + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) + ''' + # zone domain length must be less than 250 chars + if len(args['domain']) > 250: + stderr = 'Zone domain must be less than 250 characters in length.' + module.fail_json(failed=True, msg=stderr) + + +def check(args=None): + ''' + Support for running with check mode. + ''' + retvals = dict() + has_changed = False + + api_method = 'dns.zone_domain_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + # set changed to true if the operation would cause a change. + has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + + return retvals + + +def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): + ''' + At this point we already know whether the containing zone exists, + so we just need to create the domain (or exit if it already exists). + ''' + has_changed, has_failed = False, False + msg = None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + for zone_domain in response.json(): + if zone_domain['domain'] == args['domain']: + # zone domain already exists, nothing to change. + has_changed = False + break + else: + # we need to create the domain + api_method = 'dns.zone_domain_create' + payload['domain'] = args['domain'] + payload['zone_id'] = zone_id + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + + return has_failed, has_changed, msg + + +def delete_zone_domain(args=None, payload=None): + ''' + Deletion is pretty simple, domains are always unique so we + we don't need to do any sanity checking to avoid deleting the + wrong thing. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + api_method = 'dns.zone_domain_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + domain_exists = check_zone_domain(data=response, domain=args['domain']) + + if domain_exists: + api_method = 'dns.zone_domain_delete' + payload['domain'] = args['domain'] + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = response.json() + # unset msg as we don't want to return unnecessary info to the user. + msg = None + + return has_failed, has_changed, memset_api, msg + + +def create_or_delete_domain(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete. + ''' + retvals, payload = dict(), dict() + has_changed, has_failed = False, False + msg, stderr, memset_api = None, None, None + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals + + zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + # the zone needs to be unique - this isn't a requirement of Memset's API but it + # makes sense in the context of this module. + has_failed = True + if counter == 0: + stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) + + retvals['failed'] = has_failed + retvals['msg'] = stderr + return retvals + + if args['state'] == 'present': + has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) + + if args['state'] == 'absent': + has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + domain=dict(required=True, aliases=['name'], type='str'), + zone=dict(required=True, type='str') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # validate some API-specific limitations. + api_validation(args=args) + + if module.check_mode: + retvals = check(args) + else: + retvals = create_or_delete_domain(args) + + # we would need to populate the return values with the API's response + # in several places so it's easier to do it at the end instead. + if not retvals['failed']: + if args['state'] == 'present' and not module.check_mode: + payload = dict() + payload['domain'] = args['domain'] + api_method = 'dns.zone_domain_info' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + retvals['memset_api'] = response.json() + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/memset_zone_record.py new file mode 100644 index 000000000..4e56a11ca --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/memset_zone_record.py @@ -0,0 +1,393 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Simon Weald +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: memset_zone_record +author: "Simon Weald (@glitchcrab)" +short_description: Create and delete records in Memset DNS zones +notes: + - Zones can be thought of as a logical group of domains, all of which share the + same DNS records (i.e. they point to the same IP). An API key generated via the + Memset customer control panel is needed with the following minimum scope - + I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). + - Currently this module can only create one DNS record at a time. Multiple records + should be created using C(with_items). +description: + - Manage DNS records in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [ absent, present ] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + address: + required: true + description: + - The address for this record (can be IP or text string depending on record type). + type: str + aliases: [ ip, data ] + priority: + description: + - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). + type: int + default: 0 + record: + required: false + description: + - The subdomain to create. + type: str + default: '' + type: + required: true + description: + - The type of DNS record to create. + choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] + type: str + relative: + type: bool + default: false + description: + - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) + and C(SRV)record types. + ttl: + description: + - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a + valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). + default: 0 + choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] + type: int + zone: + required: true + description: + - The name of the zone to which to add the record to. + type: str +''' + +EXAMPLES = ''' +# Create DNS record for www.domain.com +- name: Create DNS record + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: A + record: www + address: 1.2.3.4 + ttl: 300 + relative: false + delegate_to: localhost + +# create an SPF record for domain.com +- name: Create SPF record for domain.com + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + state: present + zone: domain.com + type: TXT + address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all" + delegate_to: localhost + +# create multiple DNS records +- name: Create multiple DNS records + community.general.memset_zone_record: + api_key: dcf089a2896940da9ffefb307ef49ccd + zone: "{{ item.zone }}" + type: "{{ item.type }}" + record: "{{ item.record }}" + address: "{{ item.address }}" + delegate_to: localhost + with_items: + - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } + - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } +''' + +RETURN = ''' +memset_api: + description: Record info from the Memset API. + returned: when state == present + type: complex + contains: + address: + description: Record content (may be an IP, string or blank depending on record type). + returned: always + type: str + sample: 1.1.1.1 + id: + description: Record ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" + priority: + description: Priority for C(MX) and C(SRV) records. + returned: always + type: int + sample: 10 + record: + description: Name of record. + returned: always + type: str + sample: "www" + relative: + description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. + returned: always + type: bool + sample: false + ttl: + description: Record TTL. + returned: always + type: int + sample: 10 + type: + description: Record type. + returned: always + type: str + sample: AAAA + zone_id: + description: Zone ID. + returned: always + type: str + sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id +from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call + + +def api_validation(args=None): + ''' + Perform some validation which will be enforced by Memset's API (see: + https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) + ''' + failed_validation = False + + # priority can only be integer 0 > 999 + if not 0 <= args['priority'] <= 999: + failed_validation = True + error = 'Priority must be in the range 0 > 999 (inclusive).' + # data value must be max 250 chars + if len(args['address']) > 250: + failed_validation = True + error = "Address must be less than 250 characters in length." + # record value must be max 250 chars + if args['record']: + if len(args['record']) > 63: + failed_validation = True + error = "Record must be less than 63 characters in length." + # relative isn't used for all record types + if args['relative']: + if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']: + failed_validation = True + error = "Relative is only valid for CNAME, MX, NS and SRV record types." + # if any of the above failed then fail early + if failed_validation: + module.fail_json(failed=True, msg=error) + + +def create_zone_record(args=None, zone_id=None, records=None, payload=None): + ''' + Sanity checking has already occurred prior to this function being + called, so we can go ahead and either create or update the record. + As defaults are defined for all values in the argument_spec, this + may cause some changes to occur as the defaults are enforced (if + the user has only configured required variables). + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # assemble the new record. + new_record = dict() + new_record['zone_id'] = zone_id + for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']: + new_record[arg] = args[arg] + + # if we have any matches, update them. + if records: + for zone_record in records: + # record exists, add ID to payload. + new_record['id'] = zone_record['id'] + if zone_record == new_record: + # nothing to do; record is already correct so we populate + # the return var with the existing record's details. + memset_api = zone_record + return has_changed, has_failed, memset_api, msg + else: + # merge dicts ensuring we change any updated values + payload = zone_record.copy() + payload.update(new_record) + api_method = 'dns.zone_record_update' + if args['check_mode']: + has_changed = True + # return the new record to the user in the returned var. + memset_api = new_record + return has_changed, has_failed, memset_api, msg + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + else: + # no record found, so we need to create it + api_method = 'dns.zone_record_create' + payload = new_record + if args['check_mode']: + has_changed = True + # populate the return var with the new record's details. + memset_api = new_record + return has_changed, has_failed, memset_api, msg + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = new_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return has_changed, has_failed, memset_api, msg + + +def delete_zone_record(args=None, records=None, payload=None): + ''' + Matching records can be cleanly deleted without affecting other + resource types, so this is pretty simple to achieve. + ''' + has_changed, has_failed = False, False + msg, memset_api = None, None + + # if we have any matches, delete them. + if records: + for zone_record in records: + if args['check_mode']: + has_changed = True + return has_changed, has_failed, memset_api, msg + payload['id'] = zone_record['id'] + api_method = 'dns.zone_record_delete' + has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) + if not has_failed: + has_changed = True + memset_api = zone_record + # empty msg as we don't want to return a boatload of json to the user. + msg = None + + return has_changed, has_failed, memset_api, msg + + +def create_or_delete(args=None): + ''' + We need to perform some initial sanity checking and also look + up required info before handing it off to create or delete functions. + Check mode is integrated into the create or delete functions. + ''' + has_failed, has_changed = False, False + msg, memset_api, stderr = None, None, None + retvals, payload = dict(), dict() + + # get the zones and check if the relevant zone exists. + api_method = 'dns.zone_list' + _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + if _has_failed: + # this is the first time the API is called; incorrect credentials will + # manifest themselves at this point so we need to ensure the user is + # informed of the reason. + retvals['failed'] = _has_failed + retvals['msg'] = msg + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals + + zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) + + if not zone_exists: + has_failed = True + if counter == 0: + stderr = "DNS zone {0} does not exist." . format(args['zone']) + elif counter > 1: + stderr = "{0} matches multiple zones." . format(args['zone']) + retvals['failed'] = has_failed + retvals['msg'] = stderr + retvals['stderr'] = stderr + return retvals + + # get a list of all records ( as we can't limit records by zone) + api_method = 'dns.zone_record_list' + _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) + + # find any matching records + records = [record for record in response.json() if record['zone_id'] == zone_id + and record['record'] == args['record'] and record['type'] == args['type']] + + if args['state'] == 'present': + has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload) + + if args['state'] == 'absent': + has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload) + + retvals['changed'] = has_changed + retvals['failed'] = has_failed + for val in ['msg', 'stderr', 'memset_api']: + if val is not None: + retvals[val] = eval(val) + + return retvals + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + api_key=dict(required=True, type='str', no_log=True), + zone=dict(required=True, type='str'), + type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), + address=dict(required=True, aliases=['ip', 'data'], type='str'), + record=dict(required=False, default='', type='str'), + ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + priority=dict(required=False, default=0, type='int'), + relative=dict(required=False, default=False, type='bool') + ), + supports_check_mode=True + ) + + # populate the dict with the user-provided vars. + args = dict() + for key, arg in module.params.items(): + args[key] = arg + args['check_mode'] = module.check_mode + + # perform some Memset API-specific validation + api_validation(args=args) + + retvals = create_or_delete(args) + + if retvals['failed']: + module.fail_json(**retvals) + else: + module.exit_json(**retvals) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mksysb.py b/ansible_collections/community/general/plugins/modules/mksysb.py new file mode 100644 index 000000000..8272dbf7d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mksysb.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +author: Kairo Araujo (@kairoaraujo) +module: mksysb +short_description: Generates AIX mksysb rootvg backups +description: + - This module manages a basic AIX mksysb (image) of rootvg. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + backup_crypt_files: + description: + - Backup encrypted files. + type: bool + default: true + backup_dmapi_fs: + description: + - Back up DMAPI filesystem files. + type: bool + default: true + create_map_files: + description: + - Creates a new MAP files. + type: bool + default: false + exclude_files: + description: + - Excludes files using C(/etc/rootvg.exclude). + type: bool + default: false + exclude_wpar_files: + description: + - Excludes WPAR files. + type: bool + default: false + extended_attrs: + description: + - Backup extended attributes. + type: bool + default: true + name: + type: str + description: + - Backup name + required: true + new_image_data: + description: + - Creates a new file data. + type: bool + default: true + software_packing: + description: + - Exclude files from packing option listed in + C(/etc/exclude_packing.rootvg). + type: bool + default: false + storage_path: + type: str + description: + - Storage path where the mksysb will stored. + required: true + use_snapshot: + description: + - Creates backup using snapshots. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Running a backup image mksysb + community.general.mksysb: + name: myserver + storage_path: /repository/images + exclude_files: true + exclude_wpar_files: true +''' + +RETURN = ''' +changed: + description: Return changed for mksysb actions as true or false. + returned: always + type: bool +msg: + description: Return message regarding the action. + returned: always + type: str +''' + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class MkSysB(ModuleHelper): + module = dict( + argument_spec=dict( + backup_crypt_files=dict(type='bool', default=True), + backup_dmapi_fs=dict(type='bool', default=True), + create_map_files=dict(type='bool', default=False), + exclude_files=dict(type='bool', default=False), + exclude_wpar_files=dict(type='bool', default=False), + extended_attrs=dict(type='bool', default=True), + name=dict(type='str', required=True), + new_image_data=dict(type='bool', default=True), + software_packing=dict(type='bool', default=False), + storage_path=dict(type='str', required=True), + use_snapshot=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + command_args_formats = dict( + create_map_files=cmd_runner_fmt.as_bool("-m"), + use_snapshot=cmd_runner_fmt.as_bool("-T"), + exclude_files=cmd_runner_fmt.as_bool("-e"), + exclude_wpar_files=cmd_runner_fmt.as_bool("-G"), + new_image_data=cmd_runner_fmt.as_bool("-i"), + software_packing=cmd_runner_fmt.as_bool_not("-p"), + extended_attrs=cmd_runner_fmt.as_bool("-a"), + backup_crypt_files=cmd_runner_fmt.as_bool_not("-Z"), + backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), + combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), + ) + + def __init_module__(self): + if not os.path.isdir(self.vars.storage_path): + self.do_raise("Storage path %s is not valid." % self.vars.storage_path) + + def __run__(self): + def process(rc, out, err): + if rc != 0: + self.do_raise("mksysb failed: {0}".format(out)) + + runner = CmdRunner( + self.module, + ['mksysb', '-X'], + self.command_args_formats, + ) + with runner(['create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing', + 'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'], + output_process=process, check_mode_skip=True) as ctx: + ctx.run(combined_path=[self.vars.storage_path, self.vars.name]) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + self.changed = True + + +def main(): + MkSysB.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/modprobe.py b/ansible_collections/community/general/plugins/modules/modprobe.py new file mode 100644 index 000000000..6389d758d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/modprobe.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, David Stygstra +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: modprobe +short_description: Load or unload kernel modules +author: + - David Stygstra (@stygstra) + - Julien Dauphant (@jdauphant) + - Matt Jeffery (@mattjeffery) +description: + - Load or unload kernel modules. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + required: true + description: + - Name of kernel module to manage. + state: + type: str + description: + - Whether the module should be present or absent. + choices: [ absent, present ] + default: present + params: + type: str + description: + - Modules parameters. + default: '' + persistent: + type: str + choices: [ disabled, absent, present ] + default: disabled + description: + - Persistency between reboots for configured module. + - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots. + - If C(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot. + - If C(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be + loaded on next reboot. + - If C(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. + - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the + kernel modules themselves instead of configuration like this. + - In fact, most modern kernel modules are prepared for automatic loading already. + - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than C(disabled)." +''' + +EXAMPLES = ''' +- name: Add the 802.1q module + community.general.modprobe: + name: 8021q + state: present + +- name: Add the dummy module + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + +- name: Add the dummy module and make sure it is loaded after reboots + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + persistent: present +''' + +import os.path +import platform +import shlex +import traceback +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +RELEASE_VER = platform.release() +MODULES_LOAD_LOCATION = '/etc/modules-load.d' +PARAMETERS_FILES_LOCATION = '/etc/modprobe.d' + + +class Modprobe(object): + + def __init__(self, module): + self.module = module + self.modprobe_bin = module.get_bin_path('modprobe', True) + + self.check_mode = module.check_mode + self.desired_state = module.params['state'] + self.name = module.params['name'] + self.params = module.params['params'] + self.persistent = module.params['persistent'] + + self.changed = False + + self.re_find_module = re.compile(r'^ *{0} *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_find_params = re.compile(r'^options {0} \w+=\S+ *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_get_params_and_values = re.compile(r'^options {0} (\w+=\S+) *(?:[#;].*)?\n?\Z'.format(self.name)) + + def load_module(self): + command = [self.modprobe_bin] + if self.check_mode: + command.append('-n') + command.extend([self.name] + shlex.split(self.params)) + + rc, out, err = self.module.run_command(command) + + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + if self.check_mode or self.module_loaded(): + self.changed = True + else: + rc, stdout, stderr = self.module.run_command( + [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + ) + if rc != 0: + self.module.warn(stderr) + + @property + def module_is_loaded_persistently(self): + for module_file in self.modules_files: + with open(module_file) as file: + for line in file: + if self.re_find_module.match(line): + return True + + return False + + @property + def params_is_set(self): + desired_params = set(self.params.split()) + + return desired_params == self.permanent_params + + @property + def permanent_params(self): + params = set() + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + for line in file: + match = self.re_get_params_and_values.match(line) + if match: + params.add(match.group(1)) + + return params + + def create_module_file(self): + file_path = os.path.join(MODULES_LOAD_LOCATION, + self.name + '.conf') + with open(file_path, 'w') as file: + file.write(self.name + '\n') + + @property + def module_options_file_content(self): + file_content = ['options {0} {1}'.format(self.name, param) + for param in self.params.split()] + return '\n'.join(file_content) + '\n' + + def create_module_options_file(self): + new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, + self.name + '.conf') + with open(new_file_path, 'w') as file: + file.write(self.module_options_file_content) + + def disable_old_params(self): + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_params.match(line): + file_content[index] = '#' + line + content_changed = True + + if content_changed: + with open(modprobe_file, 'w') as file: + file.write('\n'.join(file_content)) + + def disable_module_permanent(self): + + for module_file in self.modules_files: + with open(module_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_module.match(line): + file_content[index] = '#' + line + content_changed = True + + if content_changed: + with open(module_file, 'w') as file: + file.write('\n'.join(file_content)) + + def load_module_permanent(self): + + if not self.module_is_loaded_persistently: + self.create_module_file() + self.changed = True + + if not self.params_is_set: + self.disable_old_params() + self.create_module_options_file() + self.changed = True + + def unload_module_permanent(self): + if self.module_is_loaded_persistently: + self.disable_module_permanent() + self.changed = True + + if self.permanent_params: + self.disable_old_params() + self.changed = True + + @property + def modules_files(self): + modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path) + for path in os.listdir(MODULES_LOAD_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + @property + def modprobe_files(self): + modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path) + for path in os.listdir(PARAMETERS_FILES_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + def module_loaded(self): + is_loaded = False + try: + with open('/proc/modules') as modules: + module_name = self.name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + is_loaded = True + break + + if not is_loaded: + module_file = '/' + self.name + '.ko' + builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.rstrip().endswith(module_file): + is_loaded = True + break + except (IOError, OSError) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result) + + return is_loaded + + def unload_module(self): + command = [self.modprobe_bin, '-r', self.name] + if self.check_mode: + command.append('-n') + + rc, out, err = self.module.run_command(command) + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + self.changed = True + + @property + def result(self): + return { + 'changed': self.changed, + 'name': self.name, + 'params': self.params, + 'state': self.desired_state, + } + + +def build_module(): + return AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + persistent=dict(type='str', default='disabled', choices=['disabled', 'present', 'absent']), + ), + supports_check_mode=True, + ) + + +def main(): + module = build_module() + + modprobe = Modprobe(module) + + if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + modprobe.load_module() + elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + modprobe.unload_module() + + if modprobe.persistent == 'present' and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set): + modprobe.load_module_permanent() + elif modprobe.persistent == 'absent' and (modprobe.module_is_loaded_persistently or modprobe.permanent_params): + modprobe.unload_module_permanent() + + module.exit_json(**modprobe.result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/monit.py b/ansible_collections/community/general/plugins/modules/monit.py new file mode 100644 index 000000000..d2a160678 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/monit.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Darryl Stoflet +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: monit +short_description: Manage the state of a program monitored via Monit +description: + - Manage the state of a program monitored via I(Monit). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the I(monit) program/process to manage. + required: true + type: str + state: + description: + - The state of service. + required: true + choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] + type: str + timeout: + description: + - If there are pending actions for the service monitored by monit, then Ansible will check + for up to this many seconds to verify the requested action has been performed. + Ansible will sleep for five seconds between each check. + default: 300 + type: int +author: + - Darryl Stoflet (@dstoflet) + - Simon Kelly (@snopoke) +''' + +EXAMPLES = ''' +- name: Manage the state of program httpd to be in started state + community.general.monit: + name: httpd + state: started +''' + +import time +import re + +from collections import namedtuple + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import python_2_unicode_compatible + + +STATE_COMMAND_MAP = { + 'stopped': 'stop', + 'started': 'start', + 'monitored': 'monitor', + 'unmonitored': 'unmonitor', + 'restarted': 'restart' +} + +MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program', + 'Network'] + + +@python_2_unicode_compatible +class StatusValue(namedtuple("Status", "value, is_pending")): + MISSING = 'missing' + OK = 'ok' + NOT_MONITORED = 'not_monitored' + INITIALIZING = 'initializing' + DOES_NOT_EXIST = 'does_not_exist' + EXECUTION_FAILED = 'execution_failed' + ALL_STATUS = [ + MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED + ] + + def __new__(cls, value, is_pending=False): + return super(StatusValue, cls).__new__(cls, value, is_pending) + + def pending(self): + return StatusValue(self.value, True) + + def __getattr__(self, item): + if item in ('is_%s' % status for status in self.ALL_STATUS): + return self.value == getattr(self, item[3:].upper()) + raise AttributeError(item) + + def __str__(self): + return "%s%s" % (self.value, " (pending)" if self.is_pending else "") + + +class Status(object): + MISSING = StatusValue(StatusValue.MISSING) + OK = StatusValue(StatusValue.OK) + RUNNING = StatusValue(StatusValue.OK) + NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED) + INITIALIZING = StatusValue(StatusValue.INITIALIZING) + DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST) + EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED) + + +class Monit(object): + def __init__(self, module, monit_bin_path, service_name, timeout): + self.module = module + self.monit_bin_path = monit_bin_path + self.process_name = service_name + self.timeout = timeout + + self._monit_version = None + self._raw_version = None + self._status_change_retry_count = 6 + + def monit_version(self): + if self._monit_version is None: + self._raw_version, version = self._get_monit_version() + # Use only major and minor even if there are more these should be enough + self._monit_version = version[0], version[1] + return self._monit_version + + def _get_monit_version(self): + rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True) + version_line = out.split('\n')[0] + raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group() + return raw_version, tuple(map(int, raw_version.split('.'))) + + def exit_fail(self, msg, status=None, **kwargs): + kwargs.update({ + 'msg': msg, + 'monit_version': self._raw_version, + 'process_status': str(status) if status else None, + }) + self.module.fail_json(**kwargs) + + def exit_success(self, state): + self.module.exit_json(changed=True, name=self.process_name, state=state) + + @property + def command_args(self): + return ["-B"] if self.monit_version() > (5, 18) else [] + + def get_status(self, validate=False): + """Return the status of the process in monit. + + :@param validate: Force monit to re-check the status of the process + """ + monit_command = "validate" if validate else "status" + check_rc = False if validate else True # 'validate' always has rc = 1 + command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name] + rc, out, err = self.module.run_command(command, check_rc=check_rc) + return self._parse_status(out, err) + + def _parse_status(self, output, err): + escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES]) + pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name)) + if not re.search(pattern, output, re.IGNORECASE): + return Status.MISSING + + status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE) + if not status_val: + self.exit_fail("Unable to find process status", stdout=output, stderr=err) + + status_val = status_val[0].strip().upper() + if ' | ' in status_val: + status_val = status_val.split(' | ')[0] + if ' - ' not in status_val: + status_val = status_val.replace(' ', '_') + return getattr(Status, status_val) + else: + status_val, substatus = status_val.split(' - ') + action, state = substatus.split() + if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']: + status = Status.OK + else: + status = Status.NOT_MONITORED + + if state == 'pending': + status = status.pending() + return status + + def is_process_present(self): + command = [self.monit_bin_path, 'summary'] + self.command_args + rc, out, err = self.module.run_command(command, check_rc=True) + return bool(re.findall(r'\b%s\b' % self.process_name, out)) + + def is_process_running(self): + return self.get_status().is_ok + + def run_command(self, command): + """Runs a monit command, and returns the new status.""" + return self.module.run_command([self.monit_bin_path, command, self.process_name], check_rc=True) + + def wait_for_status_change(self, current_status): + running_status = self.get_status() + if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED: + return running_status + + loop_count = 0 + while running_status.value == current_status.value: + if loop_count >= self._status_change_retry_count: + self.exit_fail('waited too long for monit to change state', running_status) + + loop_count += 1 + time.sleep(0.5) + validate = loop_count % 2 == 0 # force recheck of status every second try + running_status = self.get_status(validate) + return running_status + + def wait_for_monit_to_stop_pending(self, current_status=None): + """Fails this run if there is no status or it's pending/initializing for timeout""" + timeout_time = time.time() + self.timeout + + if not current_status: + current_status = self.get_status() + waiting_status = [ + StatusValue.MISSING, + StatusValue.INITIALIZING, + StatusValue.DOES_NOT_EXIST, + ] + while current_status.is_pending or (current_status.value in waiting_status): + if time.time() >= timeout_time: + self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status) + + time.sleep(5) + current_status = self.get_status(validate=True) + return current_status + + def reload(self): + rc, out, err = self.module.run_command([self.monit_bin_path, 'reload']) + if rc != 0: + self.exit_fail('monit reload failed', stdout=out, stderr=err) + self.exit_success(state='reloaded') + + def present(self): + self.run_command('reload') + + timeout_time = time.time() + self.timeout + while not self.is_process_present(): + if time.time() >= timeout_time: + self.exit_fail('waited too long for process to become "present"') + + time.sleep(5) + + self.exit_success(state='present') + + def change_state(self, state, expected_status, invert_expected=None): + current_status = self.get_status() + self.run_command(STATE_COMMAND_MAP[state]) + status = self.wait_for_status_change(current_status) + status = self.wait_for_monit_to_stop_pending(status) + status_match = status.value == expected_status.value + if invert_expected: + status_match = not status_match + if status_match: + self.exit_success(state=state) + self.exit_fail('%s process not %s' % (self.process_name, state), status) + + def stop(self): + self.change_state('stopped', Status.NOT_MONITORED) + + def unmonitor(self): + self.change_state('unmonitored', Status.NOT_MONITORED) + + def restart(self): + self.change_state('restarted', Status.OK) + + def start(self): + self.change_state('started', Status.OK) + + def monitor(self): + self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True) + + +def main(): + arg_spec = dict( + name=dict(required=True), + timeout=dict(default=300, type='int'), + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + state = module.params['state'] + timeout = module.params['timeout'] + + monit = Monit(module, module.get_bin_path('monit', True), name, timeout) + + def exit_if_check_mode(): + if module.check_mode: + module.exit_json(changed=True) + + if state == 'reloaded': + exit_if_check_mode() + monit.reload() + + present = monit.is_process_present() + + if not present and not state == 'present': + module.fail_json(msg='%s process not presently configured with monit' % name, name=name) + + if state == 'present': + if present: + module.exit_json(changed=False, name=name, state=state) + exit_if_check_mode() + monit.present() + + monit.wait_for_monit_to_stop_pending() + running = monit.is_process_running() + + if running and state in ['started', 'monitored']: + module.exit_json(changed=False, name=name, state=state) + + if running and state == 'stopped': + exit_if_check_mode() + monit.stop() + + if running and state == 'unmonitored': + exit_if_check_mode() + monit.unmonitor() + + elif state == 'restarted': + exit_if_check_mode() + monit.restart() + + elif not running and state == 'started': + exit_if_check_mode() + monit.start() + + elif not running and state == 'monitored': + exit_if_check_mode() + monit.monitor() + + module.exit_json(changed=False, name=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mqtt.py b/ansible_collections/community/general/plugins/modules/mqtt.py new file mode 100644 index 000000000..389382649 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mqtt.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, 2014, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: mqtt +short_description: Publish a message on an MQTT topic for the IoT +description: + - Publish a message on an MQTT topic. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + server: + type: str + description: + - MQTT broker address/name + default: localhost + port: + type: int + description: + - MQTT broker port number + default: 1883 + username: + type: str + description: + - Username to authenticate against the broker. + password: + type: str + description: + - Password for C(username) to authenticate against the broker. + client_id: + type: str + description: + - MQTT client identifier + - If not specified, a value C(hostname + pid) will be used. + topic: + type: str + description: + - MQTT topic name + required: true + payload: + type: str + description: + - Payload. The special string C("None") may be used to send a NULL + (i.e. empty) payload which is useful to simply notify with the I(topic) + or to clear previously retained messages. + required: true + qos: + type: str + description: + - QoS (Quality of Service) + default: "0" + choices: [ "0", "1", "2" ] + retain: + description: + - Setting this flag causes the broker to retain (i.e. keep) the message so that + applications that subsequently subscribe to the topic can received the last + retained message immediately. + type: bool + default: false + ca_cert: + type: path + description: + - The path to the Certificate Authority certificate files that are to be + treated as trusted by this client. If this is the only option given + then the client will operate in a similar manner to a web browser. That + is to say it will require the broker to have a certificate signed by the + Certificate Authorities in ca_certs and will communicate using TLS v1, + but will not attempt any form of authentication. This provides basic + network encryption but may not be sufficient depending on how the broker + is configured. + aliases: [ ca_certs ] + client_cert: + type: path + description: + - The path pointing to the PEM encoded client certificate. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [ certfile ] + client_key: + type: path + description: + - The path pointing to the PEM encoded client private key. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + aliases: [ keyfile ] + tls_version: + description: + - Specifies the version of the SSL/TLS protocol to be used. + - By default (if the python version supports it) the highest TLS version is + detected. If unavailable, TLS v1 is used. + type: str + choices: + - tlsv1.1 + - tlsv1.2 +requirements: [ mosquitto ] +notes: + - This module requires a connection to an MQTT broker such as Mosquitto + U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). +author: "Jan-Piet Mens (@jpmens)" +''' + +EXAMPLES = ''' +- name: Publish a message on an MQTT topic + community.general.mqtt: + topic: 'service/ansible/{{ ansible_hostname }}' + payload: 'Hello at {{ ansible_date_time.iso8601 }}' + qos: 0 + retain: false + client_id: ans001 + delegate_to: localhost +''' + +# =========================================== +# MQTT module support methods. +# + +import os +import ssl +import traceback +import platform + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +HAS_PAHOMQTT = True +PAHOMQTT_IMP_ERR = None +try: + import socket + import paho.mqtt.publish as mqtt +except ImportError: + PAHOMQTT_IMP_ERR = traceback.format_exc() + HAS_PAHOMQTT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +# =========================================== +# Main +# + +def main(): + tls_map = {} + + try: + tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2 + except AttributeError: + pass + + try: + tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1 + except AttributeError: + pass + + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(default=1883, type='int'), + topic=dict(required=True), + payload=dict(required=True), + client_id=dict(default=None), + qos=dict(default="0", choices=["0", "1", "2"]), + retain=dict(default=False, type='bool'), + username=dict(default=None), + password=dict(default=None, no_log=True), + ca_cert=dict(default=None, type='path', aliases=['ca_certs']), + client_cert=dict(default=None, type='path', aliases=['certfile']), + client_key=dict(default=None, type='path', aliases=['keyfile']), + tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) + ), + supports_check_mode=True + ) + + if not HAS_PAHOMQTT: + module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR) + + server = module.params.get("server", 'localhost') + port = module.params.get("port", 1883) + topic = module.params.get("topic") + payload = module.params.get("payload") + client_id = module.params.get("client_id", '') + qos = int(module.params.get("qos", 0)) + retain = module.params.get("retain") + username = module.params.get("username", None) + password = module.params.get("password", None) + ca_certs = module.params.get("ca_cert", None) + certfile = module.params.get("client_cert", None) + keyfile = module.params.get("client_key", None) + tls_version = module.params.get("tls_version", None) + + if client_id is None: + client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) + + if payload and payload == 'None': + payload = None + + auth = None + if username is not None: + auth = {'username': username, 'password': password} + + tls = None + if ca_certs is not None: + if tls_version: + tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23) + else: + if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"): + # Specifying `None` on later versions of python seems sufficient to + # instruct python to autonegotiate the SSL/TLS connection. On versions + # 3.5.2 and lower though we need to specify the version. + # + # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was + # not available until 3.5.3. + tls_version = ssl.PROTOCOL_SSLv23 + + tls = { + 'ca_certs': ca_certs, + 'certfile': certfile, + 'keyfile': keyfile, + 'tls_version': tls_version, + } + + try: + mqtt.single( + topic, + payload, + qos=qos, + retain=retain, + client_id=client_id, + hostname=server, + port=port, + auth=auth, + tls=tls + ) + except Exception as e: + module.fail_json( + msg="unable to publish to MQTT broker %s" % to_native(e), + exception=traceback.format_exc() + ) + + module.exit_json(changed=False, topic=topic) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mssql_db.py b/ansible_collections/community/general/plugins/modules/mssql_db.py new file mode 100644 index 000000000..4006033cf --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mssql_db.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Vedit Firat Arig +# Outline and parts are reused from Mark Theunissen's mysql_db module +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: mssql_db +short_description: Add or remove MSSQL databases from a remote host +description: + - Add or remove MSSQL databases from a remote host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - name of the database to add or remove + required: true + aliases: [ db ] + type: str + login_user: + description: + - The username used to authenticate with + type: str + default: '' + login_password: + description: + - The password used to authenticate with + type: str + default: '' + login_host: + description: + - Host running the database + type: str + required: true + login_port: + description: + - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used + default: '1433' + type: str + state: + description: + - The database state + default: present + choices: [ "present", "absent", "import" ] + type: str + target: + description: + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL + files (C(.sql)) files are supported. + type: str + autocommit: + description: + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed + within a transaction. + type: bool + default: false +notes: + - Requires the pymssql Python package on the remote host. For Ubuntu, this + is as easy as pip install pymssql (See M(ansible.builtin.pip).) +requirements: + - python >= 2.7 + - pymssql +author: Vedit Firat Arig (@vedit) +''' + +EXAMPLES = ''' +- name: Create a new database with name 'jackdata' + community.general.mssql_db: + name: jackdata + state: present + +# Copy database dump file to remote host and restore it to database 'my_db' +- name: Copy database dump file to remote host + ansible.builtin.copy: + src: dump.sql + dest: /tmp + +- name: Restore the dump file to database 'my_db' + community.general.mssql_db: + name: my_db + state: import + target: /tmp/dump.sql +''' + +RETURN = ''' +# +''' + +import os +import traceback + +PYMSSQL_IMP_ERR = None +try: + import pymssql +except ImportError: + PYMSSQL_IMP_ERR = traceback.format_exc() + mssql_found = False +else: + mssql_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def db_exists(conn, cursor, db): + cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) + conn.commit() + return bool(cursor.rowcount) + + +def db_create(conn, cursor, db): + cursor.execute("CREATE DATABASE [%s]" % db) + return db_exists(conn, cursor, db) + + +def db_delete(conn, cursor, db): + try: + cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) + except Exception: + pass + cursor.execute("DROP DATABASE [%s]" % db) + return not db_exists(conn, cursor, db) + + +def db_import(conn, cursor, module, db, target): + if os.path.isfile(target): + with open(target, 'r') as backup: + sqlQuery = "USE [%s]\n" % db + for line in backup: + if line is None: + break + elif line.startswith('GO'): + cursor.execute(sqlQuery) + sqlQuery = "USE [%s]\n" % db + else: + sqlQuery += line + cursor.execute(sqlQuery) + conn.commit() + return 0, "import successful", "" + else: + return 1, "cannot find target file", "cannot find target file" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['db']), + login_user=dict(default=''), + login_password=dict(default='', no_log=True), + login_host=dict(required=True), + login_port=dict(default='1433'), + target=dict(default=None), + autocommit=dict(type='bool', default=False), + state=dict( + default='present', choices=['present', 'absent', 'import']) + ) + ) + + if not mssql_found: + module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR) + + db = module.params['name'] + state = module.params['state'] + autocommit = module.params['autocommit'] + target = module.params["target"] + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + + login_querystring = login_host + if login_port != "1433": + login_querystring = "%s:%s" % (login_host, login_port) + + if login_user != "" and login_password == "": + module.fail_json(msg="when supplying login_user arguments login_password must be provided") + + try: + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') + cursor = conn.cursor() + except Exception as e: + if "Unknown database" in str(e): + errno, errstr = e.args + module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + else: + module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + + conn.autocommit(True) + changed = False + + if db_exists(conn, cursor, db): + if state == "absent": + try: + changed = db_delete(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error deleting database: " + str(e)) + elif state == "import": + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + else: + if state == "present": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + elif state == "import": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + + module.exit_json(changed=changed, db=db) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/mssql_script.py b/ansible_collections/community/general/plugins/modules/mssql_script.py new file mode 100644 index 000000000..1696000db --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/mssql_script.py @@ -0,0 +1,313 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Kris Budde = 2.7 + - pymssql + +author: + - Kris Budde (@kbudde) +''' + +EXAMPLES = r''' +- name: Check DB connection + community.general.mssql_script: + login_user: "{{ mssql_login_user }}" + login_password: "{{ mssql_login_password }}" + login_host: "{{ mssql_host }}" + login_port: "{{ mssql_port }}" + db: master + script: "SELECT 1" + +- name: Query with parameter + community.general.mssql_script: + login_user: "{{ mssql_login_user }}" + login_password: "{{ mssql_login_password }}" + login_host: "{{ mssql_host }}" + login_port: "{{ mssql_port }}" + script: | + SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s + params: + dbname: msdb + register: result_params +- assert: + that: + - result_params.query_results[0][0][0][0] == 'msdb' + - result_params.query_results[0][0][0][1] == 'ONLINE' + +- name: two batches with default output + community.general.mssql_script: + login_user: "{{ mssql_login_user }}" + login_password: "{{ mssql_login_password }}" + login_host: "{{ mssql_host }}" + login_port: "{{ mssql_port }}" + script: | + SELECT 'Batch 0 - Select 0' + SELECT 'Batch 0 - Select 1' + GO + SELECT 'Batch 1 - Select 0' + register: result_batches +- assert: + that: + - result_batches.query_results | length == 2 # two batch results + - result_batches.query_results[0] | length == 2 # two selects in first batch + - result_batches.query_results[0][0] | length == 1 # one row in first select + - result_batches.query_results[0][0][0] | length == 1 # one column in first row + - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. + +- name: two batches with dict output + community.general.mssql_script: + login_user: "{{ mssql_login_user }}" + login_password: "{{ mssql_login_password }}" + login_host: "{{ mssql_host }}" + login_port: "{{ mssql_port }}" + output: dict + script: | + SELECT 'Batch 0 - Select 0' as b0s0 + SELECT 'Batch 0 - Select 1' as b0s1 + GO + SELECT 'Batch 1 - Select 0' as b1s0 + register: result_batches_dict +- assert: + that: + - result_batches_dict.query_results_dict | length == 2 # two batch results + - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch + - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select + - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row +''' + +RETURN = r''' +query_results: + description: List of batches (queries separated by C(GO) keyword). + type: list + elements: list + returned: success and I(output=default) + sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + contains: + queries: + description: + - List of result sets of each query. + - If a query returns no results, the results of this and all the following queries will not be included in the output. + - Use the C(GO) keyword in I(script) to separate queries. + type: list + elements: list + contains: + rows: + description: List of rows returned by query. + type: list + elements: list + contains: + column_value: + description: + - List of column values. + - Any non-standard JSON type is converted to string. + type: list + example: ["Batch 0 - Select 0"] + returned: success, if output is default +query_results_dict: + description: List of batches (queries separated by C(GO) keyword). + type: list + elements: list + returned: success and I(output=dict) + sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] + contains: + queries: + description: + - List of result sets of each query. + - If a query returns no results, the results of this and all the following queries will not be included in the output. + Use 'GO' keyword to separate queries. + type: list + elements: list + contains: + rows: + description: List of rows returned by query. + type: list + elements: list + contains: + column_dict: + description: + - Dictionary of column names and values. + - Any non-standard JSON type is converted to string. + type: dict + example: {"col_name": "Batch 0 - Select 0"} + returned: success, if output is dict +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import traceback +import json +PYMSSQL_IMP_ERR = None +try: + import pymssql +except ImportError: + PYMSSQL_IMP_ERR = traceback.format_exc() + MSSQL_FOUND = False +else: + MSSQL_FOUND = True + + +def clean_output(o): + return str(o) + + +def run_module(): + module_args = dict( + name=dict(required=False, aliases=['db'], default=''), + login_user=dict(), + login_password=dict(no_log=True), + login_host=dict(required=True), + login_port=dict(type='int', default=1433), + script=dict(required=True), + output=dict(default='default', choices=['dict', 'default']), + params=dict(type='dict'), + ) + + result = dict( + changed=False, + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + if not MSSQL_FOUND: + module.fail_json(msg=missing_required_lib( + 'pymssql'), exception=PYMSSQL_IMP_ERR) + + db = module.params['name'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + script = module.params['script'] + output = module.params['output'] + sql_params = module.params['params'] + + login_querystring = login_host + if login_port != 1433: + login_querystring = "%s:%s" % (login_host, login_port) + + if login_user is not None and login_password is None: + module.fail_json( + msg="when supplying login_user argument, login_password must also be provided") + + try: + conn = pymssql.connect( + user=login_user, password=login_password, host=login_querystring, database=db) + cursor = conn.cursor() + except Exception as e: + if "Unknown database" in str(e): + errno, errstr = e.args + module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + else: + module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " + "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + + conn.autocommit(True) + + query_results_key = 'query_results' + if output == 'dict': + cursor = conn.cursor(as_dict=True) + query_results_key = 'query_results_dict' + + queries = script.split('\nGO\n') + result['changed'] = True + if module.check_mode: + module.exit_json(**result) + + query_results = [] + try: + for query in queries: + cursor.execute(query, sql_params) + qry_result = [] + rows = cursor.fetchall() + while rows: + qry_result.append(rows) + rows = cursor.fetchall() + query_results.append(qry_result) + except Exception as e: + return module.fail_json(msg="query failed", query=query, error=str(e), **result) + + # ensure that the result is json serializable + qry_results = json.loads(json.dumps(query_results, default=clean_output)) + + result[query_results_key] = qry_results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nagios.py b/ansible_collections/community/general/plugins/modules/nagios.py new file mode 100644 index 000000000..1831d0496 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nagios.py @@ -0,0 +1,1255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is largely copied from the Nagios module included in the +# Func project. Original copyright follows: +# +# func-nagios - Schedule downtime and enables/disable notifications +# Copyright 2011, Red Hat, Inc. +# Tim Bielawa +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: nagios +short_description: Perform common tasks in Nagios related to downtime and notifications +description: + - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." + - The C(nagios) module is not idempotent. + - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer + to the host the playbook is currently running on. + - You can specify multiple services at once by separating them with commas, .e.g. I(services=httpd,nfs,puppet). + - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself), + e.g., I(service=host). This keyword may not be given with other services at the same time. + I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) + To schedule downtime for all services on particular host use keyword "all", e.g., I(service=all). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + action: + description: + - Action to take. + - servicegroup options were added in 2.0. + - delete_downtime options were added in 2.2. + - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0. + required: true + choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", + "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", + "servicegroup_host_downtime", "acknowledge", "forced_check" ] + type: str + host: + description: + - Host to operate on in Nagios. + type: str + cmdfile: + description: + - Path to the nagios I(command file) (FIFO pipe). + Only required if auto-detection fails. + type: str + author: + description: + - Author to leave downtime comments as. + Only used when I(action) is C(downtime) or C(acknowledge). + type: str + default: Ansible + comment: + description: + - Comment when I(action) is C(downtime) or C(acknowledge). + type: str + default: Scheduling downtime + start: + description: + - When downtime should start, in C(time_t) format (epoch seconds). + version_added: '0.2.0' + type: str + minutes: + description: + - Minutes to schedule downtime for. + - Only usable with the C(downtime) action. + type: int + default: 30 + services: + description: + - > + What to manage downtime/alerts for. Separate multiple services with commas. + I(service) is an alias for I(services). + B(Required) option when I(action) is one of: C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), C(disable_alerts). + aliases: [ "service" ] + type: str + servicegroup: + description: + - The Servicegroup we want to set downtimes/alerts for. + B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). + type: str + command: + description: + - The raw command to send to nagios, which + should not include the submitted time header or the line-feed + B(Required) option when using the C(command) action. + type: str + +author: "Tim Bielawa (@tbielawa)" +''' + +EXAMPLES = ''' +- name: Set 30 minutes of apache downtime + community.general.nagios: + action: downtime + minutes: 30 + service: httpd + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime + community.general.nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00 + community.general.nagios: + action: downtime + start: 1555984800 + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + +- name: Schedule an hour of HOST downtime, with a comment describing the reason + community.general.nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + comment: Rebuilding machine + +- name: Schedule downtime for ALL services on HOST + community.general.nagios: + action: downtime + minutes: 45 + service: all + host: '{{ inventory_hostname }}' + +- name: Schedule downtime for a few services + community.general.nagios: + action: downtime + services: frob,foobar,qeuz + host: '{{ inventory_hostname }}' + +- name: Set 30 minutes downtime for all services in servicegroup foo + community.general.nagios: + action: servicegroup_service_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +- name: Set 30 minutes downtime for all host in servicegroup foo + community.general.nagios: + action: servicegroup_host_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +- name: Delete all downtime for a given host + community.general.nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: all + +- name: Delete all downtime for HOST with a particular comment + community.general.nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: host + comment: Planned maintenance + +- name: Acknowledge an HOST with a particular comment + community.general.nagios: + action: acknowledge + service: host + host: '{{ inventory_hostname }}' + comment: 'power outage - see casenr 12345' + +- name: Acknowledge an active service problem for the httpd service with a particular comment + community.general.nagios: + action: acknowledge + service: httpd + host: '{{ inventory_hostname }}' + comment: 'service crashed - see casenr 12345' + +- name: Reset a passive service check for snmp trap + community.general.nagios: + action: forced_check + service: snmp + host: '{{ inventory_hostname }}' + +- name: Force an active service check for the httpd service + community.general.nagios: + action: forced_check + service: httpd + host: '{{ inventory_hostname }}' + +- name: Force an active service check for all services of a particular host + community.general.nagios: + action: forced_check + service: all + host: '{{ inventory_hostname }}' + +- name: Force an active service check for a particular host + community.general.nagios: + action: forced_check + service: host + host: '{{ inventory_hostname }}' + +- name: Enable SMART disk alerts + community.general.nagios: + action: enable_alerts + service: smart + host: '{{ inventory_hostname }}' + +- name: Disable httpd and nfs alerts + community.general.nagios: + action: disable_alerts + service: httpd,nfs + host: '{{ inventory_hostname }}' + +- name: Disable HOST alerts + community.general.nagios: + action: disable_alerts + service: host + host: '{{ inventory_hostname }}' + +- name: Silence ALL alerts + community.general.nagios: + action: silence + host: '{{ inventory_hostname }}' + +- name: Unsilence all alerts + community.general.nagios: + action: unsilence + host: '{{ inventory_hostname }}' + +- name: Shut up nagios + community.general.nagios: + action: silence_nagios + +- name: Annoy me negios + community.general.nagios: + action: unsilence_nagios + +- name: Command something + community.general.nagios: + action: command + command: DISABLE_FAILURE_PREDICTION +''' + +import time +import os.path +import stat + +from ansible.module_utils.basic import AnsibleModule + + +def which_cmdfile(): + locations = [ + # rhel + '/etc/nagios/nagios.cfg', + # debian + '/etc/nagios3/nagios.cfg', + # older debian + '/etc/nagios2/nagios.cfg', + # bsd, solaris + '/usr/local/etc/nagios/nagios.cfg', + # groundwork it monitoring + '/usr/local/groundwork/nagios/etc/nagios.cfg', + # open monitoring distribution + '/omd/sites/oppy/tmp/nagios/nagios.cfg', + # ??? + '/usr/local/nagios/etc/nagios.cfg', + '/usr/local/nagios/nagios.cfg', + '/opt/nagios/etc/nagios.cfg', + '/opt/nagios/nagios.cfg', + # icinga on debian/ubuntu + '/etc/icinga/icinga.cfg', + # icinga installed from source (default location) + '/usr/local/icinga/etc/icinga.cfg', + ] + + for path in locations: + if os.path.exists(path): + for line in open(path): + if line.startswith('command_file'): + return line.split('=')[1].strip() + + return None + + +def main(): + ACTION_CHOICES = [ + 'downtime', + 'delete_downtime', + 'silence', + 'unsilence', + 'enable_alerts', + 'disable_alerts', + 'silence_nagios', + 'unsilence_nagios', + 'command', + 'servicegroup_host_downtime', + 'servicegroup_service_downtime', + 'acknowledge', + 'forced_check', + ] + + module = AnsibleModule( + argument_spec=dict( + action=dict(type='str', required=True, choices=ACTION_CHOICES), + author=dict(type='str', default='Ansible'), + comment=dict(type='str', default='Scheduling downtime'), + host=dict(type='str'), + servicegroup=dict(type='str'), + start=dict(type='str'), + minutes=dict(type='int', default=30), + cmdfile=dict(type='str', default=which_cmdfile()), + services=dict(type='str', aliases=['service']), + command=dict(type='str'), + ), + required_if=[ + ('action', 'downtime', ['host', 'services']), + ('action', 'delete_downtime', ['host', 'services']), + ('action', 'silence', ['host']), + ('action', 'unsilence', ['host']), + ('action', 'enable_alerts', ['host', 'services']), + ('action', 'disable_alerts', ['host', 'services']), + ('action', 'command', ['command']), + ('action', 'servicegroup_host_downtime', ['host', 'servicegroup']), + ('action', 'servicegroup_service_downtime', ['host', 'servicegroup']), + ('action', 'acknowledge', ['host', 'services']), + ('action', 'forced_check', ['host', 'services']), + ], + ) + + if not module.params['cmdfile']: + module.fail_json(msg='unable to locate nagios.cfg') + + ansible_nagios = Nagios(module, **module.params) + if module.check_mode: + module.exit_json(changed=True) + else: + ansible_nagios.act() + + +class Nagios(object): + """ + Perform common tasks in Nagios related to downtime and + notifications. + + The complete set of external commands Nagios handles is documented + on their website: + + http://old.nagios.org/developerinfo/externalcommands/commandlist.php + + Note that in the case of `schedule_svc_downtime`, + `enable_svc_notifications`, and `disable_svc_notifications`, the + service argument should be passed as a list. + """ + + def __init__(self, module, **kwargs): + self.module = module + self.action = kwargs['action'] + self.author = kwargs['author'] + self.comment = kwargs['comment'] + self.host = kwargs['host'] + self.servicegroup = kwargs['servicegroup'] + if kwargs['start'] is not None: + self.start = int(kwargs['start']) + else: + self.start = None + self.minutes = kwargs['minutes'] + self.cmdfile = kwargs['cmdfile'] + self.command = kwargs['command'] + + if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): + self.services = kwargs['services'] + else: + self.services = kwargs['services'].split(',') + + self.command_results = [] + + def _now(self): + """ + The time in seconds since 12:00:00AM Jan 1, 1970 + """ + + return int(time.time()) + + def _write_command(self, cmd): + """ + Write the given command to the Nagios command file + """ + + if not os.path.exists(self.cmdfile): + self.module.fail_json(msg='nagios command file does not exist', + cmdfile=self.cmdfile) + if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode): + self.module.fail_json(msg='nagios command file is not a fifo file', + cmdfile=self.cmdfile) + try: + with open(self.cmdfile, 'w') as fp: + fp.write(cmd) + fp.flush() + self.command_results.append(cmd.strip()) + except IOError: + self.module.fail_json(msg='unable to write to nagios command file', + cmdfile=self.cmdfile) + + def _fmt_dt_str(self, cmd, host, duration, author=None, + comment=None, start=None, + svc=None, fixed=1, trigger=0): + """ + Format an external-command downtime string. + + cmd - Nagios command ID + host - Host schedule downtime on + duration - Minutes to schedule downtime for + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + Default is to use the entry time (now) + svc - Service to schedule downtime for, omit when for host downtime + fixed - Start now if 1, start when a problem is detected if 0 + trigger - Optional ID of event to start downtime from. Leave as 0 for + fixed downtime. + + Syntax: [submitted] COMMAND;;[] + ;;;;;; + + """ + + entry_time = self._now() + if start is None: + start = entry_time + + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + duration_s = (duration * 60) + end = start + duration_s + + if not author: + author = self.author + + if not comment: + comment = self.comment + + if svc is not None: + dt_args = [svc, str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + else: + # Downtime for a host if no svc specified + dt_args = [str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + + dt_arg_str = ";".join(dt_args) + dt_str = hdr + dt_arg_str + "\n" + + return dt_str + + def _fmt_ack_str(self, cmd, host, author=None, + comment=None, svc=None, sticky=0, notify=1, persistent=0): + """ + Format an external-command acknowledge string. + + cmd - Nagios command ID + host - Host schedule downtime on + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + svc - Service to schedule downtime for, omit when for host downtime + sticky - the acknowledgement will remain until the host returns to an UP state if set to 1 + notify - a notification will be sent out to contacts + persistent - survive across restarts of the Nagios process + + Syntax: [submitted] COMMAND;;[] + ;;;; + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if not author: + author = self.author + + if not comment: + comment = self.comment + + if svc is not None: + ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment] + else: + # Downtime for a host if no svc specified + ack_args = [str(sticky), str(notify), str(persistent), author, comment] + + ack_arg_str = ";".join(ack_args) + ack_str = hdr + ack_arg_str + "\n" + + return ack_str + + def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): + """ + Format an external-command downtime deletion string. + + cmd - Nagios command ID + host - Host to remove scheduled downtime from + comment - Reason downtime was added (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + svc - Service to remove downtime for, omit to remove all downtime for the host + + Syntax: [submitted] COMMAND;; + [];[];[] + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if comment is None: + comment = self.comment + + dt_del_args = [] + if svc is not None: + dt_del_args.append(svc) + else: + dt_del_args.append('') + + if start is not None: + dt_del_args.append(str(start)) + else: + dt_del_args.append('') + + if comment is not None: + dt_del_args.append(comment) + else: + dt_del_args.append('') + + dt_del_arg_str = ";".join(dt_del_args) + dt_del_str = hdr + dt_del_arg_str + "\n" + + return dt_del_str + + def _fmt_chk_str(self, cmd, host, svc=None, start=None): + """ + Format an external-command forced host or service check string. + + cmd - Nagios command ID + host - Host to check service from + svc - Service to check + start - check time + + Syntax: [submitted] COMMAND;;[]; + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if start is None: + start = entry_time + 3 + + if svc is None: + chk_args = [str(start)] + else: + chk_args = [svc, str(start)] + + chk_arg_str = ";".join(chk_args) + chk_str = hdr + chk_arg_str + "\n" + + return chk_str + + def _fmt_notif_str(self, cmd, host=None, svc=None): + """ + Format an external-command notification string. + + cmd - Nagios command ID. + host - Host to en/disable notifications on.. A value is not required + for global downtime + svc - Service to schedule downtime for. A value is not required + for host downtime. + + Syntax: [submitted] COMMAND;[;] + """ + + entry_time = self._now() + notif_str = "[%s] %s" % (entry_time, cmd) + if host is not None: + notif_str += ";%s" % host + + if svc is not None: + notif_str += ";%s" % svc + + notif_str += "\n" + + return notif_str + + def schedule_svc_downtime(self, host, services=None, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + service. + + During the specified downtime, Nagios will not send + notifications out about the service. + + Syntax: SCHEDULE_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SVC_DOWNTIME" + + if services is None: + services = [] + + for service in services: + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service) + self._write_command(dt_cmd_str) + + def schedule_host_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for a particular + host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + Syntax: SCHEDULE_HOST_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def acknowledge_svc_problem(self, host, services=None): + """ + This command is used to acknowledge a particular + service problem. + + By acknowledging the current problem, future notifications + for the same servicestate are disabled + + Syntax: ACKNOWLEDGE_SVC_PROBLEM;;; + ;;;; + """ + + cmd = "ACKNOWLEDGE_SVC_PROBLEM" + + if services is None: + services = [] + + for service in services: + ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service) + self._write_command(ack_cmd_str) + + def acknowledge_host_problem(self, host): + """ + This command is used to acknowledge a particular + host problem. + + By acknowledging the current problem, future notifications + for the same servicestate are disabled + + Syntax: ACKNOWLEDGE_HOST_PROBLEM;;;; + ;; + """ + + cmd = "ACKNOWLEDGE_HOST_PROBLEM" + ack_cmd_str = self._fmt_ack_str(cmd, host) + self._write_command(ack_cmd_str) + + def schedule_forced_host_check(self, host): + """ + This command schedules a forced active check for a particular host. + + Syntax: SCHEDULE_FORCED_HOST_CHECK;; + """ + + cmd = "SCHEDULE_FORCED_HOST_CHECK" + + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) + self._write_command(chk_cmd_str) + + def schedule_forced_host_svc_check(self, host): + """ + This command schedules a forced active check for all services + associated with a particular host. + + Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;; + """ + + cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS" + + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None) + self._write_command(chk_cmd_str) + + def schedule_forced_svc_check(self, host, services=None): + """ + This command schedules a forced active check for a particular + service. + + Syntax: SCHEDULE_FORCED_SVC_CHECK;;; + """ + + cmd = "SCHEDULE_FORCED_SVC_CHECK" + + if services is None: + services = [] + + for service in services: + chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service) + self._write_command(chk_cmd_str) + + def schedule_host_svc_downtime(self, host, minutes=30, start=None): + """ + This command is used to schedule downtime for + all services associated with a particular host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + SCHEDULE_HOST_SVC_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start) + self._write_command(dt_cmd_str) + + def delete_host_downtime(self, host, services=None, comment=None): + """ + This command is used to remove scheduled downtime for a particular + host. + + Syntax: DEL_DOWNTIME_BY_HOST_NAME;; + [];[];[] + """ + + cmd = "DEL_DOWNTIME_BY_HOST_NAME" + + if services is None: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) + self._write_command(dt_del_cmd_str) + else: + for service in services: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) + self._write_command(dt_del_cmd_str) + + def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all hosts in a + particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None): + """ + This command is used to schedule downtime for all services in + a particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start) + self._write_command(dt_cmd_str) + + def disable_host_svc_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for all services on the specified host. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_host_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for the specified host. + + Note that this command does not disable notifications for + services associated with this host. + + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_svc_notifications(self, host, services=None): + """ + This command is used to prevent notifications from being sent + out for the specified service. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "DISABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + self._write_command(notif_str) + + def disable_servicegroup_host_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all hosts in the specified servicegroup. + + Note that this command does not disable notifications for + services associated with hosts in this service group. + + Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_servicegroup_svc_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all services in the specified servicegroup. + + Note that this does not prevent notifications from being sent + out about the hosts in this servicegroup. + + Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_hostgroup_host_notifications(self, hostgroup): + """ + Disables notifications for all hosts in a particular + hostgroup. + + Note that this does not disable notifications for the services + associated with the hosts in the hostgroup - see the + DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. + + Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def disable_hostgroup_svc_notifications(self, hostgroup): + """ + Disables notifications for all services associated with hosts + in a particular hostgroup. + + Note that this does not disable notifications for the hosts in + the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS + command for that. + + Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def enable_host_notifications(self, host): + """ + Enables notifications for a particular host. + + Note that this command does not enable notifications for + services associated with this host. + + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def enable_host_svc_notifications(self, host): + """ + Enables notifications for all services on the specified host. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_svc_notifications(self, host, services=None): + """ + Enables notifications for a particular service. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "ENABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + + nagios_return = True + return_str_list = [] + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_host_notifications(self, hostgroup): + """ + Enables notifications for all hosts in a particular hostgroup. + + Note that this command does not enable notifications for + services associated with the hosts in this hostgroup. + + Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_svc_notifications(self, hostgroup): + """ + Enables notifications for all services that are associated + with hosts in a particular hostgroup. + + Note that this does not enable notifications for the hosts in + this hostgroup. + + Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_host_notifications(self, servicegroup): + """ + Enables notifications for all hosts that have services that + are members of a particular servicegroup. + + Note that this command does not enable notifications for + services associated with the hosts in this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_svc_notifications(self, servicegroup): + """ + Enables notifications for all services that are members of a + particular servicegroup. + + Note that this does not enable notifications for the hosts in + this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def silence_host(self, host): + """ + This command is used to prevent notifications from being sent + out for the host and all services on the specified host. + + This is equivalent to calling disable_host_svc_notifications + and disable_host_notifications. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "DISABLE_HOST_SVC_NOTIFICATIONS", + "DISABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def unsilence_host(self, host): + """ + This command is used to enable notifications for the host and + all services on the specified host. + + This is equivalent to calling enable_host_svc_notifications + and enable_host_notifications. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "ENABLE_HOST_SVC_NOTIFICATIONS", + "ENABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def silence_nagios(self): + """ + This command is used to disable notifications for all hosts and services + in nagios. + + This is a 'SHUT UP, NAGIOS' command + """ + cmd = 'DISABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def unsilence_nagios(self): + """ + This command is used to enable notifications for all hosts and services + in nagios. + + This is a 'OK, NAGIOS, GO'' command + """ + cmd = 'ENABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def nagios_cmd(self, cmd): + """ + This sends an arbitrary command to nagios + + It prepends the submitted time and appends a \n + + You just have to provide the properly formatted command + """ + + pre = '[%s]' % int(time.time()) + + post = '\n' + cmdstr = '%s %s%s' % (pre, cmd, post) + self._write_command(cmdstr) + + def act(self): + """ + Figure out what you want to do from ansible, and then do the + needful (at the earliest). + """ + # host or service downtime? + if self.action == 'downtime': + if self.services == 'host': + self.schedule_host_downtime(self.host, minutes=self.minutes, + start=self.start) + elif self.services == 'all': + self.schedule_host_svc_downtime(self.host, minutes=self.minutes, + start=self.start) + else: + self.schedule_svc_downtime(self.host, + services=self.services, + minutes=self.minutes, + start=self.start) + + elif self.action == 'acknowledge': + if self.services == 'host': + self.acknowledge_host_problem(self.host) + else: + self.acknowledge_svc_problem(self.host, services=self.services) + + elif self.action == 'delete_downtime': + if self.services == 'host': + self.delete_host_downtime(self.host) + elif self.services == 'all': + self.delete_host_downtime(self.host, comment='') + else: + self.delete_host_downtime(self.host, services=self.services) + + elif self.action == 'forced_check': + if self.services == 'host': + self.schedule_forced_host_check(self.host) + elif self.services == 'all': + self.schedule_forced_host_svc_check(self.host) + else: + self.schedule_forced_svc_check(self.host, services=self.services) + + elif self.action == "servicegroup_host_downtime": + if self.servicegroup: + self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + elif self.action == "servicegroup_service_downtime": + if self.servicegroup: + self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start) + + # toggle the host AND service alerts + elif self.action == 'silence': + self.silence_host(self.host) + + elif self.action == 'unsilence': + self.unsilence_host(self.host) + + # toggle host/svc alerts + elif self.action == 'enable_alerts': + if self.services == 'host': + self.enable_host_notifications(self.host) + elif self.services == 'all': + self.enable_host_svc_notifications(self.host) + else: + self.enable_svc_notifications(self.host, + services=self.services) + + elif self.action == 'disable_alerts': + if self.services == 'host': + self.disable_host_notifications(self.host) + elif self.services == 'all': + self.disable_host_svc_notifications(self.host) + else: + self.disable_svc_notifications(self.host, + services=self.services) + elif self.action == 'silence_nagios': + self.silence_nagios() + + elif self.action == 'unsilence_nagios': + self.unsilence_nagios() + + elif self.action == 'command': + self.nagios_cmd(self.command) + + # wtf? + else: + self.module.fail_json(msg="unknown action specified: '%s'" % + self.action) + + self.module.exit_json(nagios_commands=self.command_results, + changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/netcup_dns.py b/ansible_collections/community/general/plugins/modules/netcup_dns.py new file mode 100644 index 000000000..77be50b2c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/netcup_dns.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Nicolai Buchwitz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: netcup_dns +notes: [] +short_description: Manage Netcup DNS records +description: + - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + description: + - "API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))." + required: true + type: str + api_password: + description: + - "API password for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))." + required: true + type: str + customer_id: + description: + - Netcup customer id. + required: true + type: int + domain: + description: + - Domainname the records should be added / removed. + required: true + type: str + record: + description: + - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name). + default: "@" + aliases: [ name ] + type: str + type: + description: + - Record type. + choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS'] + required: true + type: str + value: + description: + - Record value. + required: true + type: str + solo: + type: bool + default: false + description: + - Whether the record should be the only one for that record type and record name. Only use with I(state=present). + - This will delete all other records with the same record name and type. + priority: + description: + - Record priority. Required for I(type=MX). + required: false + type: int + state: + description: + - Whether the record should exist or not. + required: false + default: present + choices: [ 'present', 'absent' ] + type: str + timeout: + description: + - HTTP(S) connection timeout in seconds. + default: 5 + type: int + version_added: 5.7.0 +requirements: + - "nc-dnsapi >= 0.1.3" +author: "Nicolai Buchwitz (@nbuchwitz)" + +''' + +EXAMPLES = ''' +- name: Create a record of type A + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + +- name: Delete that record + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + state: absent + +- name: Create a wildcard record + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "*" + type: "A" + value: "127.0.1.1" + +- name: Set the MX record for example.com + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + type: "MX" + value: "mail.example.com" + +- name: Set a record and ensure that this is the only one + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + name: "demo" + domain: "example.com" + type: "AAAA" + value: "::1" + solo: true + +- name: Increase the connection timeout to avoid problems with an unstable connection + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + timeout: 30 + +''' + +RETURN = ''' +records: + description: list containing all records + returned: success + type: complex + contains: + name: + description: the record name + returned: success + type: str + sample: fancy-hostname + type: + description: the record type + returned: succcess + type: str + sample: A + value: + description: the record destination + returned: success + type: str + sample: 127.0.0.1 + priority: + description: the record priority (only relevant if type=MX) + returned: success + type: int + sample: 0 + id: + description: internal id of the record + returned: success + type: int + sample: 12345 +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +NCDNSAPI_IMP_ERR = None +try: + import nc_dnsapi + from nc_dnsapi import DNSRecord + + HAS_NCDNSAPI = True +except ImportError: + NCDNSAPI_IMP_ERR = traceback.format_exc() + HAS_NCDNSAPI = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True, no_log=True), + api_password=dict(required=True, no_log=True), + customer_id=dict(required=True, type='int'), + + domain=dict(required=True), + record=dict(required=False, default='@', aliases=['name']), + type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']), + value=dict(required=True), + priority=dict(required=False, type='int'), + solo=dict(required=False, type='bool', default=False), + state=dict(required=False, choices=['present', 'absent'], default='present'), + timeout=dict(required=False, type='int', default=5), + + ), + supports_check_mode=True + ) + + if not HAS_NCDNSAPI: + module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR) + + api_key = module.params.get('api_key') + api_password = module.params.get('api_password') + customer_id = module.params.get('customer_id') + domain = module.params.get('domain') + record_type = module.params.get('type') + record = module.params.get('record') + value = module.params.get('value') + priority = module.params.get('priority') + solo = module.params.get('solo') + state = module.params.get('state') + timeout = module.params.get('timeout') + + if record_type == 'MX' and not priority: + module.fail_json(msg="record type MX required the 'priority' argument") + + has_changed = False + all_records = [] + try: + with nc_dnsapi.Client(customer_id, api_key, api_password, timeout) as api: + all_records = api.dns_records(domain) + record = DNSRecord(record, record_type, value, priority=priority) + + # try to get existing record + record_exists = False + for r in all_records: + if r == record: + record_exists = True + record = r + + break + + if state == 'present': + if solo: + obsolete_records = [r for r in all_records if + r.hostname == record.hostname + and r.type == record.type + and not r.destination == record.destination] + + if obsolete_records: + if not module.check_mode: + all_records = api.delete_dns_records(domain, obsolete_records) + + has_changed = True + + if not record_exists: + if not module.check_mode: + all_records = api.add_dns_record(domain, record) + + has_changed = True + elif state == 'absent' and record_exists: + if not module.check_mode: + all_records = api.delete_dns_record(domain, record) + + has_changed = True + + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) + + +def record_data(r): + return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id} + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py new file mode 100644 index 000000000..ac9903b57 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: newrelic_deployment +author: "Matt Coddington (@mcodd)" +short_description: Notify New Relic about app deployments +description: + - Notify New Relic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token to place in the Api-Key header. + required: true + app_name: + type: str + description: + - The value of app_name in the newrelic.yml file used by the application. + - One of I(app_name) or I(application_id) is required. + required: false + application_id: + type: str + description: + - The application ID found in the metadata of the application in APM. + - One of I(app_name) or I(application_id) is required. + required: false + changelog: + type: str + description: + - A list of changes for this deployment + required: false + description: + type: str + description: + - Text annotation for the deployment - notes for you + required: false + revision: + type: str + description: + - A revision number (e.g., git commit SHA) + required: true + user: + type: str + description: + - The name of the user/process that triggered this deployment + required: false + appname: + type: str + description: + - Name of the application. + - This option has been deprecated and will be removed in community.general 7.0.0. Please do not use. + required: false + environment: + type: str + description: + - The environment for this deployment. + - This option has been deprecated and will be removed community.general 7.0.0. Please do not use. + required: false + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: true + type: bool +requirements: [] +''' + +EXAMPLES = ''' +- name: Notify New Relic about an app deployment + community.general.newrelic_deployment: + token: AAAAAA + app_name: myapp + user: ansible deployment + revision: '1.0' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import quote +import json + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + app_name=dict(required=False), + application_id=dict(required=False), + changelog=dict(required=False), + description=dict(required=False), + revision=dict(required=True), + user=dict(required=False), + appname=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'), + environment=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'), + validate_certs=dict(default=True, type='bool'), + ), + required_one_of=[['app_name', 'application_id']], + supports_check_mode=True + ) + + # build list of params + params = {} + if module.params["app_name"] and module.params["application_id"]: + module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") + + app_id = None + if module.params["app_name"]: + app_id = get_application_id(module) + elif module.params["application_id"]: + app_id = module.params["application_id"] + else: + module.fail_json(msg="you must set one of 'app_name' or 'application_id'") + + if app_id is None: + module.fail_json(msg="No application with name %s is found in NewRelic" % module.params["app_name"]) + + for item in ["changelog", "description", "revision", "user"]: + if module.params[item]: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to New Relic + url = "https://api.newrelic.com/v2/applications/%s/deployments.json" % quote(str(app_id), safe='') + data = { + 'deployment': params + } + headers = { + 'Api-Key': module.params["token"], + 'Content-Type': 'application/json', + } + response, info = fetch_url(module, url, data=module.jsonify(data), headers=headers, method="POST") + if info['status'] in (200, 201): + module.exit_json(changed=True) + else: + module.fail_json(msg="Unable to insert deployment marker: %s" % info['msg']) + + +def get_application_id(module): + url = "https://api.newrelic.com/v2/applications.json" + data = "filter[name]=%s" % module.params["app_name"] + headers = { + 'Api-Key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] not in (200, 201): + module.fail_json(msg="Unable to get application: %s" % info['msg']) + + result = json.loads(response.read()) + if result is None or len(result.get("applications", "")) == 0: + module.fail_json(msg='No application found with name "%s"' % module.params["app_name"]) + + return result["applications"][0]["id"] + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nexmo.py b/ansible_collections/community/general/plugins/modules/nexmo.py new file mode 100644 index 000000000..7461c1cb9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nexmo.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Matt Martz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: nexmo +short_description: Send a SMS via nexmo +description: + - Send a SMS message via nexmo +author: "Matt Martz (@sivel)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + api_key: + type: str + description: + - Nexmo API Key + required: true + api_secret: + type: str + description: + - Nexmo API Secret + required: true + src: + type: int + description: + - Nexmo Number to send from + required: true + dest: + type: list + elements: int + description: + - Phone number(s) to send SMS message to + required: true + msg: + type: str + description: + - Message to text to send. Messages longer than 160 characters will be + split into multiple messages + required: true + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +''' + +EXAMPLES = """ +- name: Send notification message via Nexmo + community.general.nexmo: + api_key: 640c8a53 + api_secret: 0ce239a6 + src: 12345678901 + dest: + - 10987654321 + - 16789012345 + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost +""" +import json + +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, url_argument_spec + + +NEXMO_API = 'https://rest.nexmo.com/sms/json' + + +def send_msg(module): + failed = list() + responses = dict() + msg = { + 'api_key': module.params.get('api_key'), + 'api_secret': module.params.get('api_secret'), + 'from': module.params.get('src'), + 'text': module.params.get('msg') + } + for number in module.params.get('dest'): + msg['to'] = number + url = "%s?%s" % (NEXMO_API, urlencode(msg)) + + headers = dict(Accept='application/json') + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + failed.append(number) + responses[number] = dict(failed=True) + + try: + responses[number] = json.load(response) + except Exception: + failed.append(number) + responses[number] = dict(failed=True) + else: + for message in responses[number]['messages']: + if int(message['status']) != 0: + failed.append(number) + responses[number] = dict(failed=True, **responses[number]) + + if failed: + msg = 'One or messages failed to send' + else: + msg = '' + + module.exit_json(failed=bool(failed), msg=msg, changed=False, + responses=responses) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + dict( + api_key=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + src=dict(required=True, type='int'), + dest=dict(required=True, type='list', elements='int'), + msg=dict(required=True), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + send_msg(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nginx_status_info.py b/ansible_collections/community/general/plugins/modules/nginx_status_info.py new file mode 100644 index 000000000..6bbea078b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nginx_status_info.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: nginx_status_info +short_description: Retrieve information on nginx status +description: + - Gathers information from nginx from an URL having C(stub_status) enabled. +author: "René Moser (@resmo)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + url: + type: str + description: + - URL of the nginx status. + required: true + timeout: + type: int + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + +notes: + - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information. +''' + +EXAMPLES = r''' +# Gather status info from nginx on localhost +- name: Get current http stats + community.general.nginx_status_info: + url: http://localhost/nginx_status + register: result + +# Gather status info from nginx on localhost with a custom timeout of 20 seconds +- name: Get current http stats + community.general.nginx_status_info: + url: http://localhost/nginx_status + timeout: 20 + register: result +''' + +RETURN = r''' +--- +active_connections: + description: Active connections. + returned: success + type: int + sample: 2340 +accepts: + description: The total number of accepted client connections. + returned: success + type: int + sample: 81769947 +handled: + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + returned: success + type: int + sample: 81769947 +requests: + description: The total number of client requests. + returned: success + type: int + sample: 144332345 +reading: + description: The current number of connections where nginx is reading the request header. + returned: success + type: int + sample: 0 +writing: + description: The current number of connections where nginx is writing the response back to the client. + returned: success + type: int + sample: 241 +waiting: + description: The current number of idle client connections waiting for a request. + returned: success + type: int + sample: 2092 +data: + description: HTTP response as is. + returned: success + type: str + sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text + + +class NginxStatusInfo(object): + + def __init__(self): + self.url = module.params.get('url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'active_connections': None, + 'accepts': None, + 'handled': None, + 'requests': None, + 'reading': None, + 'writing': None, + 'waiting': None, + 'data': None, + } + (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) + + data = to_text(response.read(), errors='surrogate_or_strict') + if not data: + return result + + result['data'] = data + expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ + r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' + match = re.match(expr, data, re.S) + if match: + result['active_connections'] = int(match.group(1)) + result['accepts'] = int(match.group(2)) + result['handled'] = int(match.group(3)) + result['requests'] = int(match.group(4)) + result['reading'] = int(match.group(5)) + result['writing'] = int(match.group(6)) + result['waiting'] = int(match.group(7)) + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + url=dict(type='str', required=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + nginx_status_info = NginxStatusInfo().run() + module.exit_json(changed=False, **nginx_status_info) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nictagadm.py b/ansible_collections/community/general/plugins/modules/nictagadm.py new file mode 100644 index 000000000..074e09b4a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nictagadm.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Bruce Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: nictagadm +short_description: Manage nic tags on SmartOS systems +description: + - Create or delete nic tags on SmartOS systems. +author: + - Bruce Smith (@SmithX10) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the nic tag. + required: true + type: str + mac: + description: + - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub). + - Parameters I(mac) and I(etherstub) are mutually exclusive. + type: str + etherstub: + description: + - Specifies that the nic tag will be attached to a created I(etherstub). + - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac). + type: bool + default: false + mtu: + description: + - Specifies the size of the I(mtu) of the desired nic tag. + - Parameters I(mtu) and I(etherstub) are mutually exclusive. + type: int + force: + description: + - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. + type: bool + default: false + state: + description: + - Create or delete a SmartOS nic tag. + type: str + choices: [ absent, present ] + default: present +''' + +EXAMPLES = r''' +- name: Create 'storage0' on '00:1b:21:a3:f5:4d' + community.general.nictagadm: + name: storage0 + mac: 00:1b:21:a3:f5:4d + mtu: 9000 + state: present + +- name: Remove 'storage0' nic tag + community.general.nictagadm: + name: storage0 + state: absent +''' + +RETURN = r''' +name: + description: nic tag name + returned: always + type: str + sample: storage0 +mac: + description: MAC Address that the nic tag was attached to. + returned: always + type: str + sample: 00:1b:21:a3:f5:4d +etherstub: + description: specifies if the nic tag will create and attach to an etherstub. + returned: always + type: bool + sample: false +mtu: + description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. + returned: always + type: int + sample: 1500 +force: + description: Shows if -f was used during the deletion of a nic tag + returned: always + type: bool + sample: false +state: + description: state of the target + returned: always + type: str + sample: present +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.network import is_mac + + +class NicTag(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.mac = module.params['mac'] + self.etherstub = module.params['etherstub'] + self.mtu = module.params['mtu'] + self.force = module.params['force'] + self.state = module.params['state'] + + self.nictagadm_bin = self.module.get_bin_path('nictagadm', True) + + def is_valid_mac(self): + return is_mac(self.mac.lower()) + + def nictag_exists(self): + cmd = [self.nictagadm_bin, 'exists', self.name] + (rc, dummy, dummy) = self.module.run_command(cmd) + + return rc == 0 + + def add_nictag(self): + cmd = [self.nictagadm_bin, '-v', 'add'] + + if self.etherstub: + cmd.append('-l') + + if self.mtu: + cmd.append('-p') + cmd.append('mtu=' + str(self.mtu)) + + if self.mac: + cmd.append('-p') + cmd.append('mac=' + str(self.mac)) + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_nictag(self): + cmd = [self.nictagadm_bin, '-v', 'delete'] + + if self.force: + cmd.append('-f') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + mac=dict(type='str'), + etherstub=dict(type='bool', default=False), + mtu=dict(type='int'), + force=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + mutually_exclusive=[ + ['etherstub', 'mac'], + ['etherstub', 'mtu'], + ], + required_if=[ + ['etherstub', False, ['name', 'mac']], + ['state', 'absent', ['name', 'force']], + ], + supports_check_mode=True + ) + + nictag = NicTag(module) + + rc = None + out = '' + err = '' + result = dict( + changed=False, + etherstub=nictag.etherstub, + force=nictag.force, + name=nictag.name, + mac=nictag.mac, + mtu=nictag.mtu, + state=nictag.state, + ) + + if not nictag.is_valid_mac(): + module.fail_json(msg='Invalid MAC Address Value', + name=nictag.name, + mac=nictag.mac, + etherstub=nictag.etherstub) + + if nictag.state == 'absent': + if nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.delete_nictag() + if rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + elif nictag.state == 'present': + if not nictag.nictag_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nictag.add_nictag() + if rc is not None and rc != 0: + module.fail_json(name=nictag.name, msg=err, rc=rc) + + if rc is not None: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nmcli.py b/ansible_collections/community/general/plugins/modules/nmcli.py new file mode 100644 index 000000000..08680bf6e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nmcli.py @@ -0,0 +1,2504 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Chris Long +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: nmcli +author: + - Chris Long (@alcamie101) +short_description: Manage Networking +requirements: + - nmcli +extends_documentation_fragment: + - community.general.attributes +description: + - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.' + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager' + - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + type: str + required: true + choices: [ absent, present ] + autoconnect: + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated + type: bool + default: true + conn_name: + description: + - The name used to call the connection. Pattern is [-][-]. + type: str + required: true + ifname: + description: + - The interface to bind the connection to. + - The connection will only be applicable to this interface name. + - A special value of C('*') can be used for interface-independent connections. + - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. + - This parameter defaults to C(conn_name) when left unset for all connection types except vpn that removes it. + type: str + type: + description: + - This is the type of device or network connection that you wish to create or modify. + - Type C(dummy) is added in community.general 3.5.0. + - Type C(generic) is added in Ansible 2.5. + - Type C(infiniband) is added in community.general 2.0.0. + - Type C(gsm) is added in community.general 3.7.0. + - Type C(macvlan) is added in community.general 6.6.0. + - Type C(wireguard) is added in community.general 4.3.0. + - Type C(vpn) is added in community.general 5.1.0. + type: str + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan, + wifi, gsm, wireguard, vpn ] + mode: + description: + - This is the type of device or network connection that you wish to create for a bond or bridge. + type: str + choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] + default: balance-rr + transport_mode: + description: + - This option sets the connection type of Infiniband IPoIB devices. + type: str + choices: [ datagram, connected ] + version_added: 5.8.0 + master: + description: + - Master ] STP forwarding delay, in seconds. + type: int + default: 15 + hellotime: + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. + type: int + default: 2 + maxage: + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. + type: int + default: 20 + ageingtime: + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. + type: int + default: 300 + mac: + description: + - MAC address of the connection. + - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. + type: str + slavepriority: + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. + type: int + default: 32 + path_cost: + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave. + type: int + default: 100 + hairpin: + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the + frame was received on. + - The default value is C(true), but that is being deprecated + and it will be changed to C(false) in community.general 7.0.0. + type: bool + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices + should be set during the team lifetime. + type: str + choices: [ same_all, by_active, only_active ] + version_added: 3.4.0 + runner_fast_rate: + description: + - Option specifies the rate at which our link partner is asked to transmit LACPDU + packets. If this is C(true) then packets will be sent once per second. Otherwise they + will be sent every 30 seconds. + - Only allowed for C(lacp) runner. + type: bool + version_added: 6.5.0 + vlanid: + description: + - This is only used with VLAN - VLAN ID in range <0-4095>. + type: int + vlandev: + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname. + type: str + flags: + description: + - This is only used with VLAN - flags. + type: str + ingress: + description: + - This is only used with VLAN - VLAN ingress priority mapping. + type: str + egress: + description: + - This is only used with VLAN - VLAN egress priority mapping. + type: str + vxlan_id: + description: + - This is only used with VXLAN - VXLAN ID. + type: int + vxlan_remote: + description: + - This is only used with VXLAN - VXLAN destination IP address. + type: str + vxlan_local: + description: + - This is only used with VXLAN - VXLAN local IP address. + type: str + ip_tunnel_dev: + description: + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. + type: str + ip_tunnel_remote: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. + type: str + ip_tunnel_local: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. + type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 + zone: + description: + - The trust level of the connection. + - When updating this property on a currently activated connection, the change takes effect immediately. + type: str + version_added: 2.0.0 + wifi_sec: + description: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: + C({key-mgmt: wpa-psk, psk: my_password}).' + type: dict + suboptions: + auth-alg: + description: + - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. + - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties + must be specified. + type: str + choices: [ open, shared, leap ] + fils: + description: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) + (enable FILS and fail if not supported). + - When set to C(0) and no global default is set, FILS will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + group: + description: + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in + the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ wep40, wep104, tkip, ccmp ] + key-mgmt: + description: + - Key management used for the connection. + - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 + + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). + - This property must be set for any Wi-Fi connection that uses security. + type: str + choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] + leap-password-flags: + description: Flags indicating how to handle the I(leap-password) property. + type: list + elements: int + leap-password: + description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + leap-username: + description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + pairwise: + description: + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the + list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ tkip, ccmp ] + pmf: + description: + - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) + (enable PMF and fail if not supported). + - When set to C(0) and no global default is set, PMF will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + proto: + description: + - List of strings specifying the allowed WPA protocol versions to use. + - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). + - If not specified, both WPA and RSN connections are allowed. + type: list + elements: str + choices: [ wpa, rsn ] + psk-flags: + description: Flags indicating how to handle the I(psk) property. + type: list + elements: int + psk: + description: + - Pre-Shared-Key for WPA networks. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the + actual key, or the key in form of 64 hexadecimal character. + - The WPA3-Personal networks use a passphrase of any length for SAE authentication. + type: str + wep-key-flags: + description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. + type: list + elements: int + wep-key-type: + description: + - Controls the interpretation of WEP keys. + - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII + password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the + actual WEP key. + type: int + choices: [ 1, 2 ] + wep-key0: + description: + - Index 0 WEP key. This is the WEP key used in most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key1: + description: + - Index 1 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key2: + description: + - Index 2 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key3: + description: + - Index 3 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-tx-keyidx: + description: + - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. + - Valid values are C(0) (default key) through C(3). + - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + wps-method: + description: + - Flags indicating which mode of WPS is to be used if any. + - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS + enrollment from the Access Point capabilities. + - WPS can be disabled by setting this property to a value of C(1). + type: int + default: 0 + version_added: 3.0.0 + ssid: + description: + - Name of the Wireless router or the access point. + type: str + version_added: 3.0.0 + wifi: + description: + - The configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' + - 'For instance to create a hidden AP mode WiFi connection: + C({hidden: true, mode: ap}).' + type: dict + suboptions: + ap-isolation: + description: + - Configures AP isolation, which prevents communication between wireless devices connected to this AP. + - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. + - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks + from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file + shares, printers, etc. + - If set to C(0), devices can talk to each other. + - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). + type: int + choices: [ -1, 0, 1 ] + default: -1 + assigned-mac-address: + description: + - The new field for the cloned MAC address. + - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or + C(stable). + - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. + - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). + type: str + band: + description: + - 802.11 frequency band of the network. + - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. + - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not + associate with the same network in the 2.4GHz band even if the network's settings are compatible. + - This setting depends on specific driver capability and may not work with all drivers. + type: str + choices: [ a, bg ] + bssid: + description: + - If specified, directs the device to only associate with the given access point. + - This capability is highly driver dependent and not supported by all devices. + - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. + type: str + channel: + description: + - Wireless channel to use for the Wi-Fi connection. + - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. + type: int + default: 0 + cloned-mac-address: + description: + - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like + C(random). + - For libnm and nmcli, this field is called I(cloned-mac-address). + type: str + generate-mac-address-mask: + description: + - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a + locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. + - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. + - If the property is C(null), it is eligible to be overwritten by a default connection setting. + - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC + address of the device, while the unset bits are subject to randomization. + - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the + C(random) or C(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits + that shall not be randomized. + - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are + randomized. + - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, + C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally + administered. + type: str + hidden: + description: + - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. + However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with + caution. + - In AP mode, the created network does not broadcast its SSID. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the + explicit probe-scans are distinctly recognizable on the air. + type: bool + default: false + mac-address-blacklist: + description: + - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. + - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). + type: list + elements: str + mac-address-randomization: + description: + - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) + (never randomize the MAC address), or C(2) (always randomize the MAC address). + - This property is deprecated for I(cloned-mac-address). + type: int + default: 0 + choices: [ 0, 1, 2 ] + mac-address: + description: + - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. + - This property does not change the MAC address of the device (for example for MAC spoofing). + type: str + mode: + description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. + type: str + choices: [ infrastructure, mesh, adhoc, ap ] + default: infrastructure + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + powersave: + description: + - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use + the globally configured value). + - All other values are reserved. + type: int + default: 0 + choices: [ 0, 1, 2, 3 ] + rate: + description: + - If non-zero, directs the device to only use the specified bitrate for communication with the access point. + - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. + - This property is highly driver dependent and not all devices support setting a static bitrate. + type: int + default: 0 + tx-power: + description: + - If non-zero, directs the device to use the specified transmit power. + - Units are dBm. + - This property is highly driver dependent and not all devices support setting a static transmit power. + type: int + default: 0 + wake-on-wlan: + description: + - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values + C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). + - Note the option values' sum must be specified in order to combine multiple options. + type: int + default: 1 + version_added: 3.5.0 + ignore_unsupported_suboptions: + description: + - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. + - Only I(wifi) and I(wifi_sec) options are currently affected. + type: bool + default: false + version_added: 3.6.0 + gsm: + description: + - The configuration of the GSM connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' + - 'For instance to use apn, pin, username and password: + C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' + type: dict + version_added: 3.7.0 + suboptions: + apn: + description: + - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. + - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or + just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. + - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. + type: str + auto-config: + description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network + the modem will register to in the Mobile Broadband Provider database. + type: bool + default: false + device-id: + description: + - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - If given, the connection will only apply to the specified device. + type: str + home-only: + description: + - When C(true), only connections to the home network will be allowed. + - Connections to roaming networks will not be made. + type: bool + default: false + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + network-id: + description: + - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. + - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. + type: str + number: + description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. + type: str + password: + description: + - The password used to authenticate with the network, if required. + - Many providers do not require a password, or accept any password. + - But if a password is required, it is specified here. + type: str + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(password) property. + - 'Following choices are allowed: + C(0) B(NONE): The system is responsible for providing and storing this secret (default), + C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be + asked to retrieve it + C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed + C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required + (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + pin: + description: + - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. + - Specify the PIN here to allow operation of the device. + type: str + pin-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. + - See I(gsm.password-flags) for NMSettingSecretFlags choices. + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + sim-id: + description: + - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching + the given identifier.' + type: str + sim-operator-id: + description: + - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card + provisioned by the given operator.' + type: str + username: + description: + - The username used to authenticate with the network, if required. + - Many providers do not require a username, or accept any username. + - But if a username is required, it is specified here. + macvlan: + description: + - The configuration of the MAC VLAN connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).' + type: dict + version_added: 6.6.0 + suboptions: + mode: + description: + - The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device. + - 'Following choices are allowed: C(1) B(vepa), C(2) B(bridge), C(3) B(private), C(4) B(passthru) + and C(5) B(source)' + type: int + choices: [ 1, 2, 3, 4, 5 ] + required: true + parent: + description: + - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should + be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a + "mac-address" property. + type: str + required: true + promiscuous: + description: + - Whether the interface should be put in promiscuous mode. + type: bool + tap: + description: + - Whether the interface should be a MACVTAP. + type: bool + wireguard: + description: + - The configuration of the Wireguard connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' + - 'For instance to configure a listen port: + C({listen-port: 12345}).' + type: dict + version_added: 4.3.0 + suboptions: + fwmark: + description: + - The 32-bit fwmark for outgoing packets. + - The use of fwmark is optional and is by default off. Setting it to 0 disables it. + - Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark. + type: int + ip4-auto-default-route: + description: + - Whether to enable special handling of the IPv4 default route. + - If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy + routing rules will be added. + - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen + automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing" + type: bool + ip6-auto-default-route: + description: + - Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route. + type: bool + listen-port: + description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the + interface comes up. + type: int + mtu: + description: + - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments. + - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes + at the time of activation. + type: int + peer-routes: + description: + - Whether to automatically add routes for the AllowedIPs ranges of the peers. + - If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and + C(ipv6.route-table). Usually you want this automatism enabled. + - If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes) + and C(ipv6.routes), respectively. + - Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) + setting is enabled, the peer route for this peer won't be added automatically. + type: bool + private-key: + description: The 256 bit private-key in base64 encoding. + type: str + private-key-flags: + description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property. + type: int + choices: [ 0, 1, 2 ] + vpn: + description: + - Configuration of a VPN connection (PPTP and L2TP). + - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) + if host has UI - are installed on the host. + type: dict + version_added: 5.1.0 + suboptions: + permissions: + description: User that will have permission to use the connection. + type: str + required: true + service-type: + description: This defines the service type of connection. + type: str + required: true + gateway: + description: The gateway to connection. It can be an IP address (for example C(192.0.2.1)) + or a FQDN address (for example C(vpn.example.com)). + type: str + required: true + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(password) property. + - 'Following choices are allowed: + C(0) B(NONE): The system is responsible for providing and storing this secret (default); + C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be + asked to retrieve it; + C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed; + C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required + (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + user: + description: Username provided by VPN administrator. + type: str + required: true + ipsec-enabled: + description: + - Enable or disable IPSec tunnel to L2TP host. + - This option is need when C(service-type) is C(org.freedesktop.NetworkManager.l2tp). + type: bool + ipsec-psk: + description: + - The pre-shared key in base64 encoding. + - > + You can encode using this Ansible jinja2 expression: C("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}"). + - This is only used when I(ipsec-enabled=true). + type: str +''' + +EXAMPLES = r''' +# These examples are using the following inventory: +# +# ## Directory layout: +# +# |_/inventory/cloud-hosts +# | /group_vars/openstack-stage.yml +# | /host_vars/controller-01.openstack.host.com +# | /host_vars/controller-02.openstack.host.com +# |_/playbook/library/nmcli.py +# | /playbook-add.yml +# | /playbook-del.yml +# ``` +# +# ## inventory examples +# ### groups_vars +# ```yml +# --- +# #devops_os_define_network +# storage_gw: "192.0.2.254" +# external_gw: "198.51.100.254" +# tenant_gw: "203.0.113.254" +# +# #Team vars +# nmcli_team: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# nmcli_team_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #bond vars +# nmcli_bond: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# mode: balance-rr +# nmcli_bond_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #ethernet vars +# nmcli_ethernet: +# - conn_name: em1 +# ifname: em1 +# ip4: +# - '{{ tenant_ip }}' +# - '{{ second_tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: em2 +# ifname: em2 +# ip4: '{{ tenant_ip1 }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: p2p1 +# ifname: p2p1 +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# - conn_name: p2p2 +# ifname: p2p2 +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# ``` +# +# ### host_vars +# ```yml +# --- +# storage_ip: "192.0.2.91/23" +# external_ip: "198.51.100.23/21" +# tenant_ip: "203.0.113.77/23" +# second_tenant_ip: "204.0.113.77/23" +# ``` + + + +## playbook-add.yml example + +--- +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Install needed network manager libs + ansible.builtin.package: + name: + - NetworkManager-libnm + - nm-connection-editor + - libsemanage-python + - policycoreutils-python + state: present + +##### Working with all cloud nodes - Teaming + - name: Try nmcli add team - conn_name only & ip4 gw4 + community.general.nmcli: + type: team + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_team }}' + + - name: Try nmcli add teams-slave + community.general.nmcli: + type: team-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_team_slave }}' + +###### Working with all cloud nodes - Bonding + - name: Try nmcli add bond - conn_name only & ip4 gw4 mode + community.general.nmcli: + type: bond + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + mode: '{{ item.mode }}' + state: present + with_items: + - '{{ nmcli_bond }}' + + - name: Try nmcli add bond-slave + community.general.nmcli: + type: bond-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_bond_slave }}' + +##### Working with all cloud nodes - Ethernet + - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 + community.general.nmcli: + type: ethernet + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_ethernet }}' + +## playbook-del.yml example +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Try nmcli del team - multiple + community.general.nmcli: + conn_name: '{{ item.conn_name }}' + state: absent + with_items: + - conn_name: em1 + - conn_name: em2 + - conn_name: p1p1 + - conn_name: p1p2 + - conn_name: p2p1 + - conn_name: p2p2 + - conn_name: tenant + - conn_name: storage + - conn_name: external + - conn_name: team-em1 + - conn_name: team-em2 + - conn_name: team-p1p1 + - conn_name: team-p1p2 + - conn_name: team-p2p1 + - conn_name: team-p2p2 + + - name: Add an Ethernet connection with static IP configuration + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + + - name: Add an Team connection with static IP configuration + community.general.nmcli: + conn_name: my-team1 + ifname: my-team1 + type: team + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + autoconnect: true + + - name: Optionally, at the same time specify IPv6 addresses for the device + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + ip6: 2001:db8::cafe + gw6: 2001:db8::1 + state: present + + - name: Add two IPv4 DNS server addresses + community.general.nmcli: + conn_name: my-eth1 + type: ethernet + dns4: + - 192.0.2.53 + - 198.51.100.53 + state: present + + - name: Make a profile usable for all compatible Ethernet interfaces + community.general.nmcli: + ctype: ethernet + name: my-eth1 + ifname: '*' + state: present + + - name: Change the property of a setting e.g. MTU + community.general.nmcli: + conn_name: my-eth1 + mtu: 9000 + type: ethernet + state: present + + - name: Add second ip4 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: + - 192.0.2.100/24 + - 192.0.3.100/24 + state: present + + - name: Add second ip6 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip6: + - 2001:db8::cafe + - 2002:db8::cafe + state: present + + - name: Add VxLan + community.general.nmcli: + type: vxlan + conn_name: vxlan_test1 + vxlan_id: 16 + vxlan_local: 192.168.1.2 + vxlan_remote: 192.168.1.5 + + - name: Add gre + community.general.nmcli: + type: gre + conn_name: gre_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add ipip + community.general.nmcli: + type: ipip + conn_name: ipip_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add sit + community.general.nmcli: + type: sit + conn_name: sit_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add zone + community.general.nmcli: + type: ethernet + conn_name: my-eth1 + zone: external + state: present + +# nmcli exits with status 0 if it succeeds and exits with a status greater +# than zero when there is a failure. The following list of status codes may be +# returned: +# +# - 0 Success - indicates the operation succeeded +# - 1 Unknown or unspecified error +# - 2 Invalid user input, wrong nmcli invocation +# - 3 Timeout expired (see --wait option) +# - 4 Connection activation failed +# - 5 Connection deactivation failed +# - 6 Disconnecting device failed +# - 7 Connection deletion failed +# - 8 NetworkManager is not running +# - 9 nmcli and NetworkManager versions mismatch +# - 10 Connection, device, or access point does not exist. + +- name: Create the wifi connection + community.general.nmcli: + type: wifi + conn_name: Brittany + ifname: wlp4s0 + ssid: Brittany + wifi_sec: + key-mgmt: wpa-psk + psk: my_password + autoconnect: true + state: present + +- name: Create a hidden AP mode wifi connection + community.general.nmcli: + type: wifi + conn_name: ChocoMaster + ifname: wlo1 + ssid: ChocoMaster + wifi: + hidden: true + mode: ap + autoconnect: true + state: present + +- name: Create a gsm connection + community.general.nmcli: + type: gsm + conn_name: my-gsm-provider + ifname: cdc-wdm0 + gsm: + apn: my.provider.apn + username: my-provider-username + password: my-provider-password + pin: my-sim-pin + autoconnect: true + state: present + +- name: Create a macvlan connection + community.general.nmcli: + type: macvlan + conn_name: my-macvlan-connection + ifname: mymacvlan0 + macvlan: + mode: 2 + parent: eth1 + autoconnect: true + state: present + +- name: Create a wireguard connection + community.general.nmcli: + type: wireguard + conn_name: my-wg-provider + ifname: mywg0 + wireguard: + listen-port: 51820 + private-key: my-private-key + autoconnect: true + state: present + +- name: >- + Create a VPN L2TP connection for ansible_user to connect on vpn.example.com + authenticating with user 'brittany' and pre-shared key as 'Brittany123' + community.general.nmcli: + type: vpn + conn_name: my-vpn-connection + vpn: + permissions: "{{ ansible_user }}" + service-type: org.freedesktop.NetworkManager.l2tp + gateway: vpn.example.com + password-flags: 2 + user: brittany + ipsec-enabled: true + ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}" + autoconnect: false + state: present + +''' + +RETURN = r"""# +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text +import re + + +class NmcliModuleError(Exception): + pass + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - edit_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform = 'Generic' + distribution = None + + SECRET_OPTIONS = ( + '802-11-wireless-security.leap-password', + '802-11-wireless-security.psk', + '802-11-wireless-security.wep-key0', + '802-11-wireless-security.wep-key1', + '802-11-wireless-security.wep-key2', + '802-11-wireless-security.wep-key3' + ) + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] + self.autoconnect = module.params['autoconnect'] + self.conn_name = module.params['conn_name'] + self.master = module.params['master'] + self.ifname = module.params['ifname'] + self.type = module.params['type'] + self.ip4 = module.params['ip4'] + self.gw4 = module.params['gw4'] + self.gw4_ignore_auto = module.params['gw4_ignore_auto'] + self.routes4 = module.params['routes4'] + self.routes4_extended = module.params['routes4_extended'] + self.route_metric4 = module.params['route_metric4'] + self.routing_rules4 = module.params['routing_rules4'] + self.never_default4 = module.params['never_default4'] + self.dns4 = module.params['dns4'] + self.dns4_search = module.params['dns4_search'] + self.dns4_ignore_auto = module.params['dns4_ignore_auto'] + self.method4 = module.params['method4'] + self.may_fail4 = module.params['may_fail4'] + self.ip6 = module.params['ip6'] + self.gw6 = module.params['gw6'] + self.gw6_ignore_auto = module.params['gw6_ignore_auto'] + self.routes6 = module.params['routes6'] + self.routes6_extended = module.params['routes6_extended'] + self.route_metric6 = module.params['route_metric6'] + self.dns6 = module.params['dns6'] + self.dns6_search = module.params['dns6_search'] + self.dns6_ignore_auto = module.params['dns6_ignore_auto'] + self.method6 = module.params['method6'] + self.ip_privacy6 = module.params['ip_privacy6'] + self.addr_gen_mode6 = module.params['addr_gen_mode6'] + self.mtu = module.params['mtu'] + self.stp = module.params['stp'] + self.priority = module.params['priority'] + self.mode = module.params['mode'] + self.miimon = module.params['miimon'] + self.primary = module.params['primary'] + self.downdelay = module.params['downdelay'] + self.updelay = module.params['updelay'] + self.xmit_hash_policy = module.params['xmit_hash_policy'] + self.arp_interval = module.params['arp_interval'] + self.arp_ip_target = module.params['arp_ip_target'] + self.slavepriority = module.params['slavepriority'] + self.forwarddelay = module.params['forwarddelay'] + self.hellotime = module.params['hellotime'] + self.maxage = module.params['maxage'] + self.ageingtime = module.params['ageingtime'] + # hairpin should be back to normal in 7.0.0 + self._hairpin = module.params['hairpin'] + self.path_cost = module.params['path_cost'] + self.mac = module.params['mac'] + self.runner = module.params['runner'] + self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] + self.runner_fast_rate = module.params['runner_fast_rate'] + self.vlanid = module.params['vlanid'] + self.vlandev = module.params['vlandev'] + self.flags = module.params['flags'] + self.ingress = module.params['ingress'] + self.egress = module.params['egress'] + self.vxlan_id = module.params['vxlan_id'] + self.vxlan_local = module.params['vxlan_local'] + self.vxlan_remote = module.params['vxlan_remote'] + self.ip_tunnel_dev = module.params['ip_tunnel_dev'] + self.ip_tunnel_local = module.params['ip_tunnel_local'] + self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] + self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] + self.nmcli_bin = self.module.get_bin_path('nmcli', True) + self.dhcp_client_id = module.params['dhcp_client_id'] + self.zone = module.params['zone'] + self.ssid = module.params['ssid'] + self.wifi = module.params['wifi'] + self.wifi_sec = module.params['wifi_sec'] + self.gsm = module.params['gsm'] + self.macvlan = module.params['macvlan'] + self.wireguard = module.params['wireguard'] + self.vpn = module.params['vpn'] + self.transport_mode = module.params['transport_mode'] + + if self.method4: + self.ipv4_method = self.method4 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4: + self.ipv4_method = 'disabled' + elif self.ip4: + self.ipv4_method = 'manual' + else: + self.ipv4_method = None + + if self.method6: + self.ipv6_method = self.method6 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6: + self.ipv6_method = 'disabled' + elif self.ip6: + self.ipv6_method = 'manual' + else: + self.ipv6_method = None + + self.edit_commands = [] + + @property + def hairpin(self): + if self._hairpin is None: + self.module.deprecate( + "Parameter 'hairpin' default value will change from true to false in community.general 7.0.0. " + "Set the value explicitly to suppress this warning.", + version='7.0.0', collection_name='community.general', + ) + # Should be False in 7.0.0 but then that should be in argument_specs + self._hairpin = True + return self._hairpin + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + if isinstance(cmd, list): + cmd = [to_text(item) for item in cmd] + else: + cmd = to_text(cmd) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def execute_edit_commands(self, commands, arguments): + arguments = arguments or [] + cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + data = "\n".join(commands) + return self.execute_command(cmd, data=data) + + def connection_options(self, detect_change=False): + # Options common to multiple connection types. + options = { + 'connection.autoconnect': self.autoconnect, + 'connection.zone': self.zone, + } + + # IP address options. + if self.ip_conn_type and not self.master: + options.update({ + 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), + 'ipv4.dhcp-client-id': self.dhcp_client_id, + 'ipv4.dns': self.dns4, + 'ipv4.dns-search': self.dns4_search, + 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, + 'ipv4.gateway': self.gw4, + 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, + 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended), + 'ipv4.route-metric': self.route_metric4, + 'ipv4.routing-rules': self.routing_rules4, + 'ipv4.never-default': self.never_default4, + 'ipv4.method': self.ipv4_method, + 'ipv4.may-fail': self.may_fail4, + 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), + 'ipv6.dns': self.dns6, + 'ipv6.dns-search': self.dns6_search, + 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, + 'ipv6.gateway': self.gw6, + 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, + 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended), + 'ipv6.route-metric': self.route_metric6, + 'ipv6.method': self.ipv6_method, + 'ipv6.ip6-privacy': self.ip_privacy6, + 'ipv6.addr-gen-mode': self.addr_gen_mode6 + }) + # when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes' + # force ignoring to save idempotency + if self.ipv4_method and self.ipv4_method != 'disabled': + options.update({'ipv4.may-fail': self.may_fail4}) + + # Layer 2 options. + if self.mac: + options.update({self.mac_setting: self.mac}) + + if self.mtu_conn_type: + options.update({self.mtu_setting: self.mtu}) + + # Connections that can have a master. + if self.slave_conn_type: + options.update({ + 'connection.master': self.master, + }) + + # Options specific to a connection type. + if self.type == 'bond': + options.update({ + 'arp-interval': self.arp_interval, + 'arp-ip-target': self.arp_ip_target, + 'downdelay': self.downdelay, + 'miimon': self.miimon, + 'mode': self.mode, + 'primary': self.primary, + 'updelay': self.updelay, + 'xmit_hash_policy': self.xmit_hash_policy, + }) + elif self.type == 'bond-slave': + options.update({ + 'connection.slave-type': 'bond', + }) + elif self.type == 'bridge': + options.update({ + 'bridge.ageing-time': self.ageingtime, + 'bridge.forward-delay': self.forwarddelay, + 'bridge.hello-time': self.hellotime, + 'bridge.max-age': self.maxage, + 'bridge.priority': self.priority, + 'bridge.stp': self.stp, + }) + # priority make sense when stp enabed, otherwise nmcli keeps bridge-priority to 32768 regrdless of input. + # force ignoring to save idempotency + if self.stp: + options.update({'bridge.priority': self.priority}) + elif self.type == 'team': + options.update({ + 'team.runner': self.runner, + 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, + }) + if self.runner_fast_rate is not None: + options.update({ + 'team.runner-fast-rate': self.runner_fast_rate, + }) + elif self.type == 'bridge-slave': + options.update({ + 'connection.slave-type': 'bridge', + 'bridge-port.path-cost': self.path_cost, + 'bridge-port.hairpin-mode': self.hairpin, + 'bridge-port.priority': self.slavepriority, + }) + elif self.type == 'team-slave': + options.update({ + 'connection.slave-type': 'team', + }) + elif self.tunnel_conn_type: + options.update({ + 'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.mode': self.type, + 'ip-tunnel.parent': self.ip_tunnel_dev, + 'ip-tunnel.remote': self.ip_tunnel_remote, + }) + if self.type == 'gre': + options.update({ + 'ip-tunnel.input-key': self.ip_tunnel_input_key, + 'ip-tunnel.output-key': self.ip_tunnel_output_key + }) + elif self.type == 'vlan': + options.update({ + 'vlan.id': self.vlanid, + 'vlan.parent': self.vlandev, + 'vlan.flags': self.flags, + 'vlan.ingress': self.ingress, + 'vlan.egress': self.egress, + }) + elif self.type == 'vxlan': + options.update({ + 'vxlan.id': self.vxlan_id, + 'vxlan.local': self.vxlan_local, + 'vxlan.remote': self.vxlan_remote, + }) + elif self.type == 'wifi': + options.update({ + '802-11-wireless.ssid': self.ssid, + 'connection.slave-type': 'bond' if self.master else None, + }) + if self.wifi: + for name, value in self.wifi.items(): + options.update({ + '802-11-wireless.%s' % name: value + }) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + options.update({ + '802-11-wireless-security.%s' % name: value + }) + elif self.type == 'gsm': + if self.gsm: + for name, value in self.gsm.items(): + options.update({ + 'gsm.%s' % name: value, + }) + elif self.type == 'macvlan': + if self.macvlan: + for name, value in self.macvlan.items(): + options.update({ + 'macvlan.%s' % name: value, + }) + elif self.state == 'present': + raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan') + elif self.type == 'wireguard': + if self.wireguard: + for name, value in self.wireguard.items(): + options.update({ + 'wireguard.%s' % name: value, + }) + elif self.type == 'vpn': + if self.vpn: + vpn_data_values = '' + for name, value in self.vpn.items(): + if name == 'service-type': + options.update({ + 'vpn.service-type': value, + }) + elif name == 'permissions': + options.update({ + 'connection.permissions': value, + }) + else: + if vpn_data_values != '': + vpn_data_values += ', ' + + if isinstance(value, bool): + value = self.bool_to_string(value) + + vpn_data_values += '%s=%s' % (name, value) + options.update({ + 'vpn.data': vpn_data_values, + }) + elif self.type == 'infiniband': + options.update({ + 'infiniband.transport-mode': self.transport_mode, + }) + + # Convert settings values based on the situation. + for setting, value in options.items(): + setting_type = self.settings_type(setting) + convert_func = None + if setting_type is bool: + # Convert all bool options to yes/no. + convert_func = self.bool_to_string + if detect_change: + if setting in ('vlan.id', 'vxlan.id'): + # Convert VLAN/VXLAN IDs to text when detecting changes. + convert_func = to_text + elif setting == self.mtu_setting: + # MTU is 'auto' by default when detecting changes. + convert_func = self.mtu_to_string + elif setting == 'ipv6.ip6-privacy': + convert_func = self.ip6_privacy_to_num + elif setting_type is list: + # Convert lists to strings for nmcli create/modify commands. + convert_func = self.list_to_string + + if callable(convert_func): + options[setting] = convert_func(options[setting]) + + return options + + @property + def ip_conn_type(self): + return self.type in ( + 'bond', + 'bridge', + 'dummy', + 'ethernet', + '802-3-ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'vlan', + 'wifi', + '802-11-wireless', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + ) + + @property + def mac_setting(self): + if self.type == 'bridge': + return 'bridge.mac-address' + else: + return '802-3-ethernet.cloned-mac-address' + + @property + def mtu_conn_type(self): + return self.type in ( + 'dummy', + 'ethernet', + 'team-slave', + 'vlan', + ) + + @property + def mtu_setting(self): + return '802-3-ethernet.mtu' + + @staticmethod + def mtu_to_string(mtu): + if not mtu: + return 'auto' + else: + return to_text(mtu) + + @staticmethod + def ip6_privacy_to_num(privacy): + ip6_privacy_values = { + 'disabled': '0', + 'prefer-public-addr': '1 (enabled, prefer public IP)', + 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', + 'unknown': '-1', + } + + if privacy is None: + return None + + if privacy not in ip6_privacy_values: + raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy)) + + return ip6_privacy_values[privacy] + + @property + def slave_conn_type(self): + return self.type in ( + 'bond-slave', + 'bridge-slave', + 'team-slave', + 'wifi', + ) + + @property + def tunnel_conn_type(self): + return self.type in ( + 'gre', + 'ipip', + 'sit', + ) + + @staticmethod + def enforce_ipv4_cidr_notation(ip4_addresses): + if ip4_addresses is None: + return None + return [address if '/' in address else address + '/32' for address in ip4_addresses] + + @staticmethod + def enforce_ipv6_cidr_notation(ip6_addresses): + if ip6_addresses is None: + return None + return [address if '/' in address else address + '/128' for address in ip6_addresses] + + def enforce_routes_format(self, routes, routes_extended): + if routes is not None: + return routes + elif routes_extended is not None: + return [self.route_to_string(route) for route in routes_extended] + else: + return None + + @staticmethod + def route_to_string(route): + result_str = '' + result_str += route['ip'] + if route.get('next_hop') is not None: + result_str += ' ' + route['next_hop'] + if route.get('metric') is not None: + result_str += ' ' + str(route['metric']) + + for attribute, value in sorted(route.items()): + if attribute not in ('ip', 'next_hop', 'metric') and value is not None: + result_str += ' {0}={1}'.format(attribute, str(value).lower()) + + return result_str + + @staticmethod + def bool_to_string(boolean): + if boolean: + return "yes" + else: + return "no" + + @staticmethod + def list_to_string(lst): + if lst is None: + return None + else: + return ",".join(lst) + + @staticmethod + def settings_type(setting): + if setting in ('bridge.stp', + 'bridge-port.hairpin-mode', + 'connection.autoconnect', + 'ipv4.never-default', + 'ipv4.ignore-auto-dns', + 'ipv4.ignore-auto-routes', + 'ipv4.may-fail', + 'ipv6.ignore-auto-dns', + 'ipv6.ignore-auto-routes', + '802-11-wireless.hidden', + 'team.runner-fast-rate'): + return bool + elif setting in ('ipv4.addresses', + 'ipv6.addresses', + 'ipv4.dns', + 'ipv4.dns-search', + 'ipv4.routes', + 'ipv4.routing-rules', + 'ipv6.dns', + 'ipv6.dns-search', + 'ipv6.routes', + '802-11-wireless-security.group', + '802-11-wireless-security.leap-password-flags', + '802-11-wireless-security.pairwise', + '802-11-wireless-security.proto', + '802-11-wireless-security.psk-flags', + '802-11-wireless-security.wep-key-flags', + '802-11-wireless.mac-address-blacklist'): + return list + return str + + def get_route_params(self, raw_values): + routes_params = [] + for raw_value in raw_values: + route_params = {} + for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value): + if parameter == 'nh': + route_params['next_hop'] = value + elif parameter == 'mt': + route_params['metric'] = value + else: + route_params[parameter] = value + routes_params.append(route_params) + return [self.route_to_string(route_params) for route_params in routes_params] + + def list_connection_info(self): + cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + raise NmcliModuleError(err) + return out.splitlines() + + def connection_exists(self): + return self.conn_name in self.list_connection_info() + + def down_connection(self): + cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] + return self.execute_command(cmd) + + def up_connection(self): + cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] + return self.execute_command(cmd) + + def connection_update(self, nmcli_command): + if nmcli_command == 'create': + cmd = [self.nmcli_bin, 'con', 'add', 'type'] + if self.tunnel_conn_type: + cmd.append('ip-tunnel') + else: + cmd.append(self.type) + cmd.append('con-name') + elif nmcli_command == 'modify': + cmd = [self.nmcli_bin, 'con', 'modify'] + else: + self.module.fail_json(msg="Invalid nmcli command.") + cmd.append(self.conn_name) + + # Use connection name as default for interface name on creation. + if nmcli_command == 'create' and self.ifname is None: + ifname = self.conn_name + else: + ifname = self.ifname + + options = { + 'connection.interface-name': ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + options.update(self.connection_options()) + + # Constructing the command. + for key, value in options.items(): + if value is not None: + if key in self.SECRET_OPTIONS: + self.edit_commands += ['set %s %s' % (key, value)] + continue + if key == 'xmit_hash_policy': + cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value]) + continue + cmd.extend([key, value]) + + return self.execute_command(cmd) + + def create_connection(self): + status = self.connection_update('create') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + if self.create_connection_up: + status = self.up_connection() + return status + + @property + def create_connection_up(self): + if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + return True + elif self.type == 'team': + if (self.dns4 is not None) or (self.dns6 is not None): + return True + return False + + def remove_connection(self): + # self.down_connection() + cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] + return self.execute_command(cmd) + + def modify_connection(self): + status = self.connection_update('modify') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + return status + + def edit_connection(self): + commands = self.edit_commands + ['save', 'quit'] + return self.execute_edit_commands(commands, arguments=[self.conn_name]) + + def show_connection(self): + cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] + + (rc, out, err) = self.execute_command(cmd) + + if rc != 0: + raise NmcliModuleError(err) + + p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') + + conn_info = dict() + for line in out.splitlines(): + pair = line.split(':', 1) + key = pair[0].strip() + key_type = self.settings_type(key) + if key and len(pair) > 1: + raw_value = pair[1].lstrip() + if raw_value == '--': + conn_info[key] = None + elif key == 'bond.options': + # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. + opts = raw_value.split(',') + for opt in opts: + alias_pair = opt.split('=', 1) + if len(alias_pair) > 1: + alias_key = alias_pair[0] + alias_value = alias_pair[1] + conn_info[alias_key] = alias_value + elif key in ('ipv4.routes', 'ipv6.routes'): + conn_info[key] = [s.strip() for s in raw_value.split(';')] + elif key_type == list: + conn_info[key] = [s.strip() for s in raw_value.split(',')] + else: + m_enum = p_enum_value.match(raw_value) + if m_enum is not None: + value = m_enum.group(1) + else: + value = raw_value + conn_info[key] = value + + return conn_info + + def get_supported_properties(self, setting): + properties = [] + + if setting == '802-11-wireless-security': + set_property = 'psk' + set_value = 'FAKEVALUE' + commands = ['set %s.%s %s' % (setting, set_property, set_value)] + else: + commands = [] + + commands += ['print %s' % setting, 'quit', 'yes'] + + (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + + if rc != 0: + raise NmcliModuleError(err) + + for line in out.splitlines(): + prefix = '%s.' % setting + if (line.startswith(prefix)): + pair = line.split(':', 1) + property = pair[0].strip().replace(prefix, '') + properties.append(property) + + return properties + + def check_for_unsupported_properties(self, setting): + if setting == '802-11-wireless': + setting_key = 'wifi' + elif setting == '802-11-wireless-security': + setting_key = 'wifi_sec' + else: + setting_key = setting + + supported_properties = self.get_supported_properties(setting) + unsupported_properties = [] + + for property, value in getattr(self, setting_key).items(): + if property not in supported_properties: + unsupported_properties.append(property) + + if unsupported_properties: + msg_options = [] + for property in unsupported_properties: + msg_options.append('%s.%s' % (setting_key, property)) + + msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) + if self.ignore_unsupported_suboptions: + self.module.warn(msg) + else: + self.module.fail_json(msg=msg) + + return unsupported_properties + + def _compare_conn_params(self, conn_info, options): + changed = False + diff_before = dict() + diff_after = dict() + + for key, value in options.items(): + # We can't just do `if not value` because then if there's a value + # of 0 specified as an integer it'll be interpreted as empty when + # it actually isn't. + if value != 0 and not value: + continue + + if key in conn_info: + current_value = conn_info[key] + if key == '802-11-wireless.wake-on-wlan' and current_value is not None: + match = re.match('0x([0-9A-Fa-f]+)', current_value) + if match: + current_value = str(int(match.group(1), 16)) + if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: + current_value = self.get_route_params(current_value) + if key == self.mac_setting: + # MAC addresses are case insensitive, nmcli always reports them in uppercase + value = value.upper() + # ensure current_value is also converted to uppercase in case nmcli changes behaviour + if current_value: + current_value = current_value.upper() + if key == 'gsm.apn': + # Depending on version nmcli adds double-qoutes to gsm.apn + # Need to strip them in order to compare both + if current_value: + current_value = current_value.strip('"') + if key == self.mtu_setting and self.mtu is None: + self.mtu = 0 + if key == 'vpn.data': + if current_value: + current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(',')) + value = sorted(part.strip() for part in value.split(',')) + else: + # parameter does not exist + current_value = None + + if isinstance(current_value, list) and isinstance(value, list): + # compare values between two lists + if key in ('ipv4.addresses', 'ipv6.addresses'): + # The order of IP addresses matters because the first one + # is the default source address for outbound connections. + changed |= current_value != value + else: + changed |= sorted(current_value) != sorted(value) + elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): + value = None + else: + value = to_text(value) + if current_value != value: + changed = True + + diff_before[key] = current_value + diff_after[key] = value + + diff = { + 'before': diff_before, + 'after': diff_after, + } + return (changed, diff) + + def is_connection_changed(self): + options = { + 'connection.interface-name': self.ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + if not self.type: + current_con_type = self.show_connection().get('connection.type') + if current_con_type: + if current_con_type == '802-11-wireless': + current_con_type = 'wifi' + self.type = current_con_type + + options.update(self.connection_options(detect_change=True)) + return self._compare_conn_params(self.show_connection(), options) + + +def main(): + # Parsing argument file + module = AnsibleModule( + argument_spec=dict( + ignore_unsupported_suboptions=dict(type='bool', default=False), + autoconnect=dict(type='bool', default=True), + state=dict(type='str', required=True, choices=['absent', 'present']), + conn_name=dict(type='str', required=True), + master=dict(type='str'), + ifname=dict(type='str'), + type=dict(type='str', + choices=[ + 'bond', + 'bond-slave', + 'bridge', + 'bridge-slave', + 'dummy', + 'ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'team-slave', + 'vlan', + 'vxlan', + 'wifi', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + ]), + ip4=dict(type='list', elements='str'), + gw4=dict(type='str'), + gw4_ignore_auto=dict(type='bool', default=False), + routes4=dict(type='list', elements='str'), + routes4_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + tos=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric4=dict(type='int'), + routing_rules4=dict(type='list', elements='str'), + never_default4=dict(type='bool', default=False), + dns4=dict(type='list', elements='str'), + dns4_search=dict(type='list', elements='str'), + dns4_ignore_auto=dict(type='bool', default=False), + method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), + may_fail4=dict(type='bool', default=True), + dhcp_client_id=dict(type='str'), + ip6=dict(type='list', elements='str'), + gw6=dict(type='str'), + gw6_ignore_auto=dict(type='bool', default=False), + dns6=dict(type='list', elements='str'), + dns6_search=dict(type='list', elements='str'), + dns6_ignore_auto=dict(type='bool', default=False), + routes6=dict(type='list', elements='str'), + routes6_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric6=dict(type='int'), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), + ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), + addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']), + # Bond Specific vars + mode=dict(type='str', default='balance-rr', + choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), + miimon=dict(type='int'), + downdelay=dict(type='int'), + updelay=dict(type='int'), + xmit_hash_policy=dict(type='str'), + arp_interval=dict(type='int'), + arp_ip_target=dict(type='str'), + primary=dict(type='str'), + # general usage + mtu=dict(type='int'), + mac=dict(type='str'), + zone=dict(type='str'), + # bridge specific vars + stp=dict(type='bool', default=True), + priority=dict(type='int', default=128), + slavepriority=dict(type='int', default=32), + forwarddelay=dict(type='int', default=15), + hellotime=dict(type='int', default=2), + maxage=dict(type='int', default=20), + ageingtime=dict(type='int', default=300), + hairpin=dict(type='bool'), + path_cost=dict(type='int', default=100), + # team specific vars + runner=dict(type='str', default='roundrobin', + choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + # team active-backup runner specific options + runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), + # team lacp runner specific options + runner_fast_rate=dict(type='bool'), + # vlan specific vars + vlanid=dict(type='int'), + vlandev=dict(type='str'), + flags=dict(type='str'), + ingress=dict(type='str'), + egress=dict(type='str'), + # vxlan specific vars + vxlan_id=dict(type='int'), + vxlan_local=dict(type='str'), + vxlan_remote=dict(type='str'), + # ip-tunnel specific vars + ip_tunnel_dev=dict(type='str'), + ip_tunnel_local=dict(type='str'), + ip_tunnel_remote=dict(type='str'), + # ip-tunnel type gre specific vars + ip_tunnel_input_key=dict(type='str', no_log=True), + ip_tunnel_output_key=dict(type='str', no_log=True), + # 802-11-wireless* specific vars + ssid=dict(type='str'), + wifi=dict(type='dict'), + wifi_sec=dict(type='dict', no_log=True), + gsm=dict(type='dict'), + macvlan=dict(type='dict', options=dict( + mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True), + parent=dict(type='str', required=True), + promiscuous=dict(type='bool'), + tap=dict(type='bool'))), + wireguard=dict(type='dict'), + vpn=dict(type='dict'), + transport_mode=dict(type='str', choices=['datagram', 'connected']), + ), + mutually_exclusive=[['never_default4', 'gw4'], + ['routes4_extended', 'routes4'], + ['routes6_extended', 'routes6']], + required_if=[("type", "wifi", [("ssid")])], + supports_check_mode=True, + ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + nmcli = Nmcli(module) + + (rc, out, err) = (None, '', '') + result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} + + # check for issues + if nmcli.conn_name is None: + nmcli.module.fail_json(msg="Please specify a name for the connection") + # team checks + if nmcli.type == "team": + if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": + nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") + if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp": + nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp") + # team-slave checks + if nmcli.type == 'team-slave': + if nmcli.master is None: + nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) + if nmcli.ifname is None: + nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) + if nmcli.type == 'wifi': + unsupported_properties = {} + if nmcli.wifi: + if 'ssid' in nmcli.wifi: + module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") + del nmcli.wifi['ssid'] + unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + if nmcli.wifi_sec: + unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + if nmcli.ignore_unsupported_suboptions and unsupported_properties: + for setting_key, properties in unsupported_properties.items(): + for property in properties: + del getattr(nmcli, setting_key)[property] + + try: + if nmcli.state == 'absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.down_connection() + (rc, out, err) = nmcli.remove_connection() + if rc != 0: + module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'present': + if nmcli.connection_exists(): + changed, diff = nmcli.is_connection_changed() + if module._diff: + result['diff'] = diff + + if changed: + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) + result['Exists'] = 'Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.modify_connection() + else: + result['Exists'] = 'Connections already exist and no changes made' + if module.check_mode: + module.exit_json(changed=False, **result) + if not nmcli.connection_exists(): + result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.create_connection() + if rc is not None and rc != 0: + module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + except NmcliModuleError as e: + module.fail_json(name=nmcli.conn_name, msg=str(e)) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nomad_job.py b/ansible_collections/community/general/plugins/modules/nomad_job.py new file mode 100644 index 000000000..ca76536b4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nomad_job.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: nomad_job +author: FERREIRA Christophe (@chris93111) +version_added: "1.3.0" +short_description: Launch a Nomad Job +description: + - Launch a Nomad job. + - Stop a Nomad job. + - Force start a Nomad job +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of job for delete, stop and start job without source. + - Name of job for delete, stop and start job without source. + - Either this or I(content) must be specified. + type: str + state: + description: + - Deploy or remove job. + choices: ["present", "absent"] + required: true + type: str + force_start: + description: + - Force job to started. + type: bool + default: false + content: + description: + - Content of Nomad job. + - Either this or I(name) must be specified. + type: str + content_format: + description: + - Type of content of Nomad job. + choices: ["hcl", "json"] + default: hcl + type: str +seealso: + - name: Nomad jobs documentation + description: Complete documentation for Nomad API jobs. + link: https://www.nomadproject.io/api-docs/jobs/ +''' + +EXAMPLES = ''' +- name: Create job + community.general.nomad_job: + host: localhost + state: present + content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" + timeout: 120 + +- name: Stop job + community.general.nomad_job: + host: localhost + state: absent + name: api + +- name: Force job to start + community.general.nomad_job: + host: localhost + state: present + name: api + timeout: 120 + force_start: true +''' + +import json + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None +try: + import nomad + import_nomad = True +except ImportError: + import_nomad = False + + +def run(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent']), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + name=dict(type='str'), + content_format=dict(choices=['hcl', 'json'], default='hcl'), + content=dict(type='str'), + force_start=dict(type='bool', default=False), + token=dict(type='str', no_log=True) + ), + supports_check_mode=True, + mutually_exclusive=[ + ["name", "content"] + ], + required_one_of=[ + ['name', 'content'] + ] + ) + + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + if module.params.get('state') == "present": + + if module.params.get('name') and not module.params.get('force_start'): + module.fail_json(msg='For start job with name, force_start is needed') + + changed = False + if module.params.get('content'): + + if module.params.get('content_format') == 'json': + + job_json = module.params.get('content') + try: + job_json = json.loads(job_json) + except ValueError as e: + module.fail_json(msg=to_native(e)) + job = dict() + job['job'] = job_json + try: + job_id = job_json.get('ID') + if job_id is None: + module.fail_json(msg="Cannot retrieve job with ID None") + plan = nomad_client.job.plan_job(job_id, job, diff=True) + if not plan['Diff'].get('Type') == "None": + changed = True + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = plan + else: + result = plan + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('content_format') == 'hcl': + + try: + job_hcl = module.params.get('content') + job_json = nomad_client.jobs.parse(job_hcl) + job = dict() + job['job'] = job_json + except nomad.api.exceptions.BadRequestNomadException as err: + msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text) + module.fail_json(msg=to_native(msg)) + try: + job_id = job_json.get('ID') + plan = nomad_client.job.plan_job(job_id, job, diff=True) + if not plan['Diff'].get('Type') == "None": + changed = True + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = plan + else: + result = plan + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('force_start'): + + try: + job = dict() + if module.params.get('name'): + job_name = module.params.get('name') + else: + job_name = job_json['Name'] + job_json = nomad_client.job.get_job(job_name) + if job_json['Status'] == 'running': + result = job_json + else: + job_json['Status'] = 'running' + job_json['Stop'] = False + job['job'] = job_json + if not module.check_mode: + result = nomad_client.jobs.register_job(job) + else: + result = nomad_client.validate.validate_job(job) + if not result.status_code == 200: + module.fail_json(msg=to_native(result.text)) + result = json.loads(result.text) + changed = True + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('state') == "absent": + + try: + if not module.params.get('name') is None: + job_name = module.params.get('name') + else: + if module.params.get('content_format') == 'hcl': + job_json = nomad_client.jobs.parse(module.params.get('content')) + job_name = job_json['Name'] + if module.params.get('content_format') == 'json': + job_json = module.params.get('content') + job_name = job_json['Name'] + job = nomad_client.job.get_job(job_name) + if job['Status'] == 'dead': + changed = False + result = job + else: + if not module.check_mode: + result = nomad_client.job.deregister_job(job_name) + else: + result = job + changed = True + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, result=result) + + +def main(): + + run() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/nomad_job_info.py new file mode 100644 index 000000000..5ee25a57a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nomad_job_info.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: nomad_job_info +author: FERREIRA Christophe (@chris93111) +version_added: "1.3.0" +short_description: Get Nomad Jobs info +description: + - Get info for one Nomad job. + - List Nomad jobs. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of job for Get info. + - If not specified, lists all jobs. + type: str +notes: + - C(check_mode) is supported. +seealso: + - name: Nomad jobs documentation + description: Complete documentation for Nomad API jobs. + link: https://www.nomadproject.io/api-docs/jobs/ +''' + +EXAMPLES = ''' +- name: Get info for job awx + community.general.nomad_job_info: + host: localhost + name: awx + register: result + +- name: List Nomad jobs + community.general.nomad_job_info: + host: localhost + register: result + +''' + +RETURN = ''' +result: + description: List with dictionary contains jobs info + returned: success + type: list + sample: [ + { + "Affinities": null, + "AllAtOnce": false, + "Constraints": null, + "ConsulToken": "", + "CreateIndex": 13, + "Datacenters": [ + "dc1" + ], + "Dispatched": false, + "ID": "example", + "JobModifyIndex": 13, + "Meta": null, + "ModifyIndex": 13, + "Multiregion": null, + "Name": "example", + "Namespace": "default", + "NomadTokenID": "", + "ParameterizedJob": null, + "ParentID": "", + "Payload": null, + "Periodic": null, + "Priority": 50, + "Region": "global", + "Spreads": null, + "Stable": false, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1602244370615307000, + "TaskGroups": [ + { + "Affinities": null, + "Constraints": null, + "Count": 1, + "EphemeralDisk": { + "Migrate": false, + "SizeMB": 300, + "Sticky": false + }, + "Meta": null, + "Migrate": { + "HealthCheck": "checks", + "HealthyDeadline": 300000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000 + }, + "Name": "cache", + "Networks": null, + "ReschedulePolicy": { + "Attempts": 0, + "Delay": 30000000000, + "DelayFunction": "exponential", + "Interval": 0, + "MaxDelay": 3600000000000, + "Unlimited": true + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Scaling": null, + "Services": null, + "ShutdownDelay": null, + "Spreads": null, + "StopAfterClientDisconnect": null, + "Tasks": [ + { + "Affinities": null, + "Artifacts": null, + "CSIPluginConfig": null, + "Config": { + "image": "redis:3.2", + "port_map": [ + { + "db": 6379.0 + } + ] + }, + "Constraints": null, + "DispatchPayload": null, + "Driver": "docker", + "Env": null, + "KillSignal": "", + "KillTimeout": 5000000000, + "Kind": "", + "Leader": false, + "Lifecycle": null, + "LogConfig": { + "MaxFileSizeMB": 10, + "MaxFiles": 10 + }, + "Meta": null, + "Name": "redis", + "Resources": { + "CPU": 500, + "Devices": null, + "DiskMB": 0, + "IOPS": 0, + "MemoryMB": 256, + "Networks": [ + { + "CIDR": "", + "DNS": null, + "Device": "", + "DynamicPorts": [ + { + "HostNetwork": "default", + "Label": "db", + "To": 0, + "Value": 0 + } + ], + "IP": "", + "MBits": 10, + "Mode": "", + "ReservedPorts": null + } + ] + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Services": [ + { + "AddressMode": "auto", + "CanaryMeta": null, + "CanaryTags": null, + "Checks": [ + { + "AddressMode": "", + "Args": null, + "CheckRestart": null, + "Command": "", + "Expose": false, + "FailuresBeforeCritical": 0, + "GRPCService": "", + "GRPCUseTLS": false, + "Header": null, + "InitialStatus": "", + "Interval": 10000000000, + "Method": "", + "Name": "alive", + "Path": "", + "PortLabel": "", + "Protocol": "", + "SuccessBeforePassing": 0, + "TLSSkipVerify": false, + "TaskName": "", + "Timeout": 2000000000, + "Type": "tcp" + } + ], + "Connect": null, + "EnableTagOverride": false, + "Meta": null, + "Name": "redis-cache", + "PortLabel": "db", + "Tags": [ + "global", + "cache" + ], + "TaskName": "" + } + ], + "ShutdownDelay": 0, + "Templates": null, + "User": "", + "Vault": null, + "VolumeMounts": null + } + ], + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "checks", + "HealthyDeadline": 180000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000, + "ProgressDeadline": 600000000000, + "Stagger": 30000000000 + }, + "Volumes": null + } + ], + "Type": "service", + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "", + "HealthyDeadline": 0, + "MaxParallel": 1, + "MinHealthyTime": 0, + "ProgressDeadline": 0, + "Stagger": 30000000000 + }, + "VaultNamespace": "", + "VaultToken": "", + "Version": 0 + } + ] + +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None +try: + import nomad + import_nomad = True +except ImportError: + import_nomad = False + + +def run(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + name=dict(type='str'), + token=dict(type='str', no_log=True) + ), + supports_check_mode=True + ) + + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + changed = False + result = list() + try: + job_list = nomad_client.jobs.get_jobs() + for job in job_list: + result.append(nomad_client.job.get_job(job.get('ID'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('name'): + filter = list() + try: + for job in result: + if job.get('ID') == module.params.get('name'): + filter.append(job) + result = filter + if not filter: + module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, result=result) + + +def main(): + + run() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/nosh.py b/ansible_collections/community/general/plugins/modules/nosh.py new file mode 100644 index 000000000..2dfb8d590 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nosh.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Thomas Caravia +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: nosh +author: + - "Thomas Caravia (@tacatac)" +short_description: Manage services with nosh +description: + - Control running and enabled state for system-wide or user services. + - BSD and Linux systems are supported. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + required: true + description: + - Name of the service to manage. + state: + type: str + required: false + choices: [ started, stopped, reset, restarted, reloaded ] + description: + - C(started)/C(stopped) are idempotent actions that will not run + commands unless necessary. + C(restarted) will always bounce the service. + C(reloaded) will send a SIGHUP or start the service. + C(reset) will start or stop the service according to whether it is + enabled or not. + enabled: + required: false + type: bool + description: + - Enable or disable the service, independently of C(*.preset) file + preference or running state. Mutually exclusive with I(preset). Will take + effect prior to I(state=reset). + preset: + required: false + type: bool + description: + - Enable or disable the service according to local preferences in C(*.preset) files. + Mutually exclusive with I(enabled). Only has an effect if set to true. Will take + effect prior to I(state=reset). + user: + required: false + default: false + type: bool + description: + - Run system-control talking to the calling user's service manager, rather than + the system-wide service manager. +requirements: + - A system with an active nosh service manager, see Notes for further information. +notes: + - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/). +''' + +EXAMPLES = ''' +- name: Start dnscache if not running + community.general.nosh: + name: dnscache + state: started + +- name: Stop mpd, if running + community.general.nosh: + name: mpd + state: stopped + +- name: Restart unbound or start it if not already running + community.general.nosh: + name: unbound + state: restarted + +- name: Reload fail2ban or start it if not already running + community.general.nosh: + name: fail2ban + state: reloaded + +- name: Disable nsd + community.general.nosh: + name: nsd + enabled: false + +- name: For package installers, set nginx running state according to local enable settings, preset and reset + community.general.nosh: + name: nginx + preset: true + state: reset + +- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is + community.general.nosh: + name: reboot + state: started + +- name: Using conditionals with the module facts + tasks: + - name: Obtain information on tinydns service + community.general.nosh: + name: tinydns + register: result + + - name: Fail if service not loaded + ansible.builtin.fail: + msg: "The {{ result.name }} service is not loaded" + when: not result.status + + - name: Fail if service is running + ansible.builtin.fail: + msg: "The {{ result.name }} service is running" + when: result.status and result.status['DaemontoolsEncoreState'] == "running" +''' + +RETURN = ''' +name: + description: name used to find the service + returned: success + type: str + sample: "sshd" +service_path: + description: resolved path for the service + returned: success + type: str + sample: "/var/sv/sshd" +enabled: + description: whether the service is enabled at system bootstrap + returned: success + type: bool + sample: true +preset: + description: whether the enabled status reflects the one set in the relevant C(*.preset) file + returned: success + type: bool + sample: 'False' +state: + description: service process run state, C(None) if the service is not loaded and will not be started + returned: if state option is used + type: str + sample: "reloaded" +status: + description: A dictionary with the key=value pairs returned by C(system-control show-json) or C(None) if the service is not loaded + returned: success + type: complex + contains: + After: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"] + Before: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Conflicts: + description: [] # FIXME + returned: success + type: list + sample: [] + DaemontoolsEncoreState: + description: [] # FIXME + returned: success + type: str + sample: "running" + DaemontoolsState: + description: [] # FIXME + returned: success + type: str + sample: "up" + Enabled: + description: [] # FIXME + returned: success + type: bool + sample: true + LogService: + description: [] # FIXME + returned: success + type: str + sample: "../cyclog@sshd" + MainPID: + description: [] # FIXME + returned: success + type: int + sample: 661 + Paused: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + ReadyAfterRun: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + RemainAfterExit: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + Required-By: + description: [] # FIXME + returned: success + type: list + sample: [] + RestartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RestartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + RunExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RunUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: 1 + StartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StopExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StopUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Stopped-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Timestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + UTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Want: + description: [] # FIXME + returned: success + type: str + sample: "nothing" + Wanted-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"] + Wants: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"] +user: + description: whether the user-level service manager is called + returned: success + type: bool + sample: false +''' + + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import fail_if_missing +from ansible.module_utils.common.text.converters import to_native + + +def run_sys_ctl(module, args): + sys_ctl = [module.get_bin_path('system-control', required=True)] + if module.params['user']: + sys_ctl = sys_ctl + ['--user'] + return module.run_command(sys_ctl + args) + + +def get_service_path(module, service): + (rc, out, err) = run_sys_ctl(module, ['find', service]) + # fail if service not found + if rc != 0: + fail_if_missing(module, False, service, msg='host') + else: + return to_native(out).strip() + + +def service_is_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path]) + return rc == 0 + + +def service_is_preset_enabled(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path]) + return to_native(out).strip().startswith("enable") + + +def service_is_loaded(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path]) + return rc == 0 + + +def get_service_status(module, service_path): + (rc, out, err) = run_sys_ctl(module, ['show-json', service_path]) + # will fail if not service is not loaded + if err is not None and err: + module.fail_json(msg=err) + else: + json_out = json.loads(to_native(out).strip()) + status = json_out[service_path] # descend past service path header + return status + + +def service_is_running(service_status): + return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running']) + + +def handle_enabled(module, result, service_path): + """Enable or disable a service as needed. + + - 'preset' will set the enabled state according to available preset file settings. + - 'enabled' will set the enabled state explicitly, independently of preset settings. + + These options are set to "mutually exclusive" but the explicit 'enabled' option will + have priority if the check is bypassed. + """ + + # computed prior in control flow + preset = result['preset'] + enabled = result['enabled'] + + # preset, effect only if option set to true (no reverse preset) + if module.params['preset']: + action = 'preset' + + # run preset if needed + if preset != module.params['preset']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['preset'] = not preset + result['enabled'] = not enabled + + # enabled/disabled state + if module.params['enabled'] is not None: + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + # change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err)) + result['enabled'] = not enabled + result['preset'] = not preset + + +def handle_state(module, result, service_path): + """Set service running state as needed. + + Takes into account the fact that a service may not be loaded (no supervise directory) in + which case it is 'stopped' as far as the service manager is concerned. No status information + can be obtained and the service can only be 'started'. + """ + # default to desired state, no action + result['state'] = module.params['state'] + state = module.params['state'] + action = None + + # computed prior in control flow, possibly modified by handle_enabled() + enabled = result['enabled'] + + # service not loaded -> not started by manager, no status information + if not service_is_loaded(module, service_path): + if state in ['started', 'restarted', 'reloaded']: + action = 'start' + result['state'] = 'started' + elif state == 'reset': + if enabled: + action = 'start' + result['state'] = 'started' + else: + result['state'] = None + else: + result['state'] = None + + # service is loaded + else: + # get status information + result['status'] = get_service_status(module, service_path) + running = service_is_running(result['status']) + + if state == 'started': + if not running: + action = 'start' + elif state == 'stopped': + if running: + action = 'stop' + # reset = start/stop according to enabled status + elif state == 'reset': + if enabled is not running: + if running: + action = 'stop' + result['state'] = 'stopped' + else: + action = 'start' + result['state'] = 'started' + # start if not running, 'service' module constraint + elif state == 'restarted': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'condrestart' + # start if not running, 'service' module constraint + elif state == 'reloaded': + if not running: + action = 'start' + result['state'] = 'started' + else: + action = 'hangup' + + # change state as needed + if action: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = run_sys_ctl(module, [action, service_path]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err)) + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + preset=dict(type='bool'), + user=dict(type='bool', default=False), + ), + supports_check_mode=True, + mutually_exclusive=[['enabled', 'preset']], + ) + + service = module.params['name'] + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + 'status': None, + } + + # check service can be found (or fail) and get path + service_path = get_service_path(module, service) + + # get preliminary service facts + result['service_path'] = service_path + result['user'] = module.params['user'] + result['enabled'] = service_is_enabled(module, service_path) + result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path) + + # set enabled state, service need not be loaded + if module.params['enabled'] is not None or module.params['preset']: + handle_enabled(module, result, service_path) + + # set service running state + if module.params['state'] is not None: + handle_state(module, result, service_path) + + # get final service status if possible + if service_is_loaded(module, service_path): + result['status'] = get_service_status(module, service_path) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/npm.py b/ansible_collections/community/general/plugins/modules/npm.py new file mode 100644 index 000000000..013fd6e57 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/npm.py @@ -0,0 +1,342 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: npm +short_description: Manage node.js packages with npm +description: + - Manage node.js packages with Node Package Manager (npm). +author: "Chris Hoffman (@chrishoffman)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of a node.js library to install. + type: str + required: false + path: + description: + - The base path where to install the node.js libraries. + type: path + required: false + version: + description: + - The version to be installed. + type: str + required: false + global: + description: + - Install the node.js library globally. + required: false + default: false + type: bool + executable: + description: + - The executable location for npm. + - This is useful if you are using a version manager, such as nvm. + type: path + required: false + ignore_scripts: + description: + - Use the C(--ignore-scripts) flag when installing. + required: false + type: bool + default: false + unsafe_perm: + description: + - Use the C(--unsafe-perm) flag when installing. + type: bool + default: false + ci: + description: + - Install packages based on package-lock file, same as running C(npm ci). + type: bool + default: false + production: + description: + - Install dependencies in production mode, excluding devDependencies. + required: false + type: bool + default: false + registry: + description: + - The registry to install modules from. + required: false + type: str + state: + description: + - The state of the node.js library. + required: false + type: str + default: present + choices: [ "present", "absent", "latest" ] + no_optional: + description: + - Use the C(--no-optional) flag when installing. + type: bool + default: false + version_added: 2.0.0 + no_bin_links: + description: + - Use the C(--no-bin-links) flag when installing. + type: bool + default: false + version_added: 2.5.0 +requirements: + - npm installed in bin path (recommended /usr/local/bin) +''' + +EXAMPLES = r''' +- name: Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + path: /app/location + +- name: Install "coffee-script" node.js package on version 1.6.1. + community.general.npm: + name: coffee-script + version: '1.6.1' + path: /app/location + +- name: Install "coffee-script" node.js package globally. + community.general.npm: + name: coffee-script + global: true + +- name: Remove the globally package "coffee-script". + community.general.npm: + name: coffee-script + global: true + state: absent + +- name: Install "coffee-script" node.js package from custom registry. + community.general.npm: + name: coffee-script + registry: 'http://registry.mysite.com' + +- name: Install packages based on package.json. + community.general.npm: + path: /app/location + +- name: Update packages based on package.json to their latest version. + community.general.npm: + path: /app/location + state: latest + +- name: Install packages based on package.json using the npm installed with nvm v0.10.1. + community.general.npm: + path: /app/location + executable: /opt/nvm/v0.10.1/bin/npm + state: present +''' + +import json +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Npm(object): + def __init__(self, module, **kwargs): + self.module = module + self.glbl = kwargs['glbl'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + self.unsafe_perm = kwargs['unsafe_perm'] + self.state = kwargs['state'] + self.no_optional = kwargs['no_optional'] + self.no_bin_links = kwargs['no_bin_links'] + + if kwargs['executable']: + self.executable = kwargs['executable'].split(' ') + else: + self.executable = [module.get_bin_path('npm', True)] + + if kwargs['version'] and self.state != 'absent': + self.name_version = self.name + '@' + str(self.version) + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + + if self.glbl: + cmd.append('--global') + if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd): + cmd.append('--production') + if self.ignore_scripts: + cmd.append('--ignore-scripts') + if self.unsafe_perm: + cmd.append('--unsafe-perm') + if self.name_version and add_package_name: + cmd.append(self.name_version) + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) + if self.no_optional: + cmd.append('--no-optional') + if self.no_bin_links: + cmd.append('--no-bin-links') + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json', '--long'] + + installed = list() + missing = list() + data = {} + try: + data = json.loads(self._exec(cmd, True, False, False) or '{}') + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) + if 'dependencies' in data: + for dep, props in data['dependencies'].items(): + + if 'missing' in props and props['missing']: + missing.append(dep) + elif 'invalid' in props and props['invalid']: + missing.append(dep) + else: + installed.append(dep) + if 'version' in props and props['version']: + dep_version = dep + '@' + str(props['version']) + installed.append(dep_version) + if self.name_version and self.name_version not in installed: + missing.append(self.name) + # Named dependency not installed + else: + missing.append(self.name) + + return installed, missing + + def install(self): + return self._exec(['install']) + + def ci_install(self): + return self._exec(['ci']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + def list_outdated(self): + outdated = list() + data = self._exec(['outdated'], True, False) + for dep in data.splitlines(): + if dep: + # node.js v0.10.22 changed the `npm outdated` module separator + # from "@" to " ". Split on both for backwards compatibility. + pkg, other = re.split(r'\s|@', dep, 1) + outdated.append(pkg) + + return outdated + + +def main(): + arg_spec = dict( + name=dict(default=None, type='str'), + path=dict(default=None, type='path'), + version=dict(default=None, type='str'), + production=dict(default=False, type='bool'), + executable=dict(default=None, type='path'), + registry=dict(default=None, type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + unsafe_perm=dict(default=False, type='bool'), + ci=dict(default=False, type='bool'), + no_optional=dict(default=False, type='bool'), + no_bin_links=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default=False, type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + glbl = module.params['global'] + production = module.params['production'] + executable = module.params['executable'] + registry = module.params['registry'] + state = module.params['state'] + ignore_scripts = module.params['ignore_scripts'] + unsafe_perm = module.params['unsafe_perm'] + ci = module.params['ci'] + no_optional = module.params['no_optional'] + no_bin_links = module.params['no_bin_links'] + + if not path and not glbl: + module.fail_json(msg='path must be specified when not using global') + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, + executable=executable, registry=registry, ignore_scripts=ignore_scripts, + unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links) + + changed = False + if ci: + npm.ci_install() + changed = True + elif state == 'present': + installed, missing = npm.list() + if missing: + changed = True + npm.install() + elif state == 'latest': + installed, missing = npm.list() + outdated = npm.list_outdated() + if missing: + changed = True + npm.install() + if outdated: + changed = True + npm.update() + else: # absent + installed, missing = npm.list() + if name in installed: + changed = True + npm.uninstall() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/nsupdate.py b/ansible_collections/community/general/plugins/modules/nsupdate.py new file mode 100644 index 000000000..b2a84f76b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/nsupdate.py @@ -0,0 +1,527 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Marcin Skarbek +# Copyright (c) 2016, Andreas Olsson +# Copyright (c) 2017, Loic Blot +# +# This module was ported from https://github.com/mskarbek/ansible-nsupdate +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: nsupdate + +short_description: Manage DNS records +description: + - Create, update and remove DNS records using DDNS updates +requirements: + - dnspython +author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Manage DNS record. + choices: ['present', 'absent'] + default: 'present' + type: str + server: + description: + - Apply DNS modification on this server, specified by IPv4 or IPv6 address. + required: true + type: str + port: + description: + - Use this TCP port when connecting to C(server). + default: 53 + type: int + key_name: + description: + - Use TSIG key name to authenticate against DNS C(server) + type: str + key_secret: + description: + - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) + type: str + key_algorithm: + description: + - Specify key algorithm used by C(key_secret). + choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', + 'hmac-sha512'] + default: 'hmac-md5' + type: str + zone: + description: + - DNS record will be modified on this C(zone). + - When omitted DNS will be queried to attempt finding the correct zone. + - Starting with Ansible 2.7 this parameter is optional. + type: str + record: + description: + - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). + required: true + type: str + type: + description: + - Sets the record type. + default: 'A' + type: str + ttl: + description: + - Sets the record TTL. + default: 3600 + type: int + value: + description: + - Sets the record value. + type: list + elements: str + protocol: + description: + - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. + default: 'tcp' + choices: ['tcp', 'udp'] + type: str +''' + +EXAMPLES = ''' +- name: Add or modify ansible.example.org A to 192.168.1.1" + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: "192.168.1.1" + +- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3" + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "ansible" + value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"] + +- name: Remove puppet.example.org CNAME + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + zone: "example.org" + record: "puppet" + type: "CNAME" + state: absent + +- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + value: "ansible.example.org." + state: present + +- name: Remove 1.1.168.192.in-addr.arpa. PTR + community.general.nsupdate: + key_name: "nsupdate" + key_secret: "+bFQtBCta7j2vWkjPkAFtgA==" + server: "10.1.1.1" + record: "1.1.168.192.in-addr.arpa." + type: "PTR" + state: absent +''' + +RETURN = ''' +changed: + description: If module has modified record + returned: success + type: str +record: + description: DNS record + returned: success + type: str + sample: 'ansible' +ttl: + description: DNS record TTL + returned: success + type: int + sample: 86400 +type: + description: DNS record type + returned: success + type: str + sample: 'CNAME' +value: + description: DNS record value(s) + returned: success + type: list + sample: '192.168.1.1' +zone: + description: DNS record zone + returned: success + type: str + sample: 'example.org.' +dns_rc: + description: dnspython return code + returned: always + type: int + sample: 4 +dns_rc_str: + description: dnspython return code (string representation) + returned: always + type: str + sample: 'REFUSED' +''' + +import traceback + +from binascii import Error as binascii_error +from socket import error as socket_error + +DNSPYTHON_IMP_ERR = None +try: + import dns.update + import dns.query + import dns.tsigkeyring + import dns.message + import dns.resolver + + HAVE_DNSPYTHON = True +except ImportError: + DNSPYTHON_IMP_ERR = traceback.format_exc() + HAVE_DNSPYTHON = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class RecordManager(object): + def __init__(self, module): + self.module = module + + if module.params['key_name']: + try: + self.keyring = dns.tsigkeyring.from_text({ + module.params['key_name']: module.params['key_secret'] + }) + except TypeError: + module.fail_json(msg='Missing key_secret') + except binascii_error as e: + module.fail_json(msg='TSIG key error: %s' % to_native(e)) + else: + self.keyring = None + + if module.params['key_algorithm'] == 'hmac-md5': + self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT' + else: + self.algorithm = module.params['key_algorithm'] + + if module.params['zone'] is None: + if module.params['record'][-1] != '.': + self.module.fail_json(msg='record must be absolute when omitting zone parameter') + self.zone = self.lookup_zone() + else: + self.zone = module.params['zone'] + + if self.zone[-1] != '.': + self.zone += '.' + + if module.params['record'][-1] != '.': + self.fqdn = module.params['record'] + '.' + self.zone + else: + self.fqdn = module.params['record'] + + if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None: + self.value = list(map(self.txt_helper, self.module.params['value'])) + else: + self.value = self.module.params['value'] + + self.dns_rc = 0 + + def txt_helper(self, entry): + if entry[0] == '"' and entry[-1] == '"': + return entry + return '"{text}"'.format(text=entry) + + def lookup_zone(self): + name = dns.name.from_text(self.module.params['record']) + while True: + query = dns.message.make_query(name, dns.rdatatype.SOA) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: + self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( + self.module.params['server'], self.module.params['record'])) + # If the response contains an Answer SOA RR whose name matches the queried name, + # this is the name of the zone in which the record needs to be inserted. + for rr in lookup.answer: + if rr.rdtype == dns.rdatatype.SOA and rr.name == name: + return rr.name.to_text() + # If the response contains an Authority SOA RR whose name is a subdomain of the queried name, + # this SOA name is the zone in which the record needs to be inserted. + for rr in lookup.authority: + if rr.rdtype == dns.rdatatype.SOA and name.fullcompare(rr.name)[0] == dns.name.NAMERELN_SUBDOMAIN: + return rr.name.to_text() + try: + name = name.parent() + except dns.name.NoParent: + self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record'])) + + def __do_update(self, update): + response = None + try: + if self.module.params['protocol'] == 'tcp': + response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + return response + + def create_or_update_record(self): + result = {'changed': False, 'failed': False} + + exists = self.record_exists() + if exists in [0, 2]: + if self.module.check_mode: + self.module.exit_json(changed=True) + + if exists == 0: + self.dns_rc = self.create_record() + if self.dns_rc != 0: + result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc + + elif exists == 2: + self.dns_rc = self.modify_record() + if self.dns_rc != 0: + result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc + + if self.dns_rc != 0: + result['failed'] = True + else: + result['changed'] = True + + else: + result['changed'] = False + + return result + + def create_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + + response = self.__do_update(update) + return dns.message.Message.rcode(response) + + def modify_record(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + + if self.module.params['type'].upper() == 'NS': + # When modifying a NS record, Bind9 silently refuses to delete all the NS entries for a zone: + # > 09-May-2022 18:00:50.352 client @0x7fe7dd1f9568 192.168.1.3#45458/key rndc_ddns_ansible: + # > updating zone 'lab/IN': attempt to delete all SOA or NS records ignored + # https://gitlab.isc.org/isc-projects/bind9/-/blob/v9_18/lib/ns/update.c#L3304 + # Let's perform dns inserts and updates first, deletes after. + query = dns.message.make_query(self.module.params['record'], self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + entries_to_remove = [n.to_text() for n in lookup.answer[0].items if n.to_text() not in self.value] + else: + update.delete(self.module.params['record'], self.module.params['type']) + + for entry in self.value: + try: + update.add(self.module.params['record'], + self.module.params['ttl'], + self.module.params['type'], + entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + + if self.module.params['type'].upper() == 'NS': + for entry in entries_to_remove: + update.delete(self.module.params['record'], self.module.params['type'], entry) + + response = self.__do_update(update) + + return dns.message.Message.rcode(response) + + def remove_record(self): + result = {'changed': False, 'failed': False} + + if self.record_exists() == 0: + return result + + # Check mode and record exists, declared fake change. + if self.module.check_mode: + self.module.exit_json(changed=True) + + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + update.delete(self.module.params['record'], self.module.params['type']) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + + if self.dns_rc != 0: + result['failed'] = True + result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc + else: + result['changed'] = True + + return result + + def record_exists(self): + update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) + try: + update.present(self.module.params['record'], self.module.params['type']) + except dns.rdatatype.UnknownRdatatype as e: + self.module.fail_json(msg='Record error: {0}'.format(to_native(e))) + + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.module.params['state'] == 'absent': + return 1 + for entry in self.value: + try: + update.present(self.module.params['record'], self.module.params['type'], entry) + except AttributeError: + self.module.fail_json(msg='value needed when state=present') + except dns.exception.SyntaxError: + self.module.fail_json(msg='Invalid/malformed value') + response = self.__do_update(update) + self.dns_rc = dns.message.Message.rcode(response) + if self.dns_rc == 0: + if self.ttl_changed(): + return 2 + else: + return 1 + else: + return 2 + else: + return 0 + + def ttl_changed(self): + query = dns.message.make_query(self.fqdn, self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + if lookup.rcode() != dns.rcode.NOERROR: + self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') + + if self.module.params['type'] == 'NS': + current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl + else: + current_ttl = lookup.answer[0].ttl + return current_ttl != self.module.params['ttl'] + + +def main(): + tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', + 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + server=dict(required=True, type='str'), + port=dict(required=False, default=53, type='int'), + key_name=dict(required=False, type='str'), + key_secret=dict(required=False, type='str', no_log=True), + key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), + zone=dict(required=False, default=None, type='str'), + record=dict(required=True, type='str'), + type=dict(required=False, default='A', type='str'), + ttl=dict(required=False, default=3600, type='int'), + value=dict(required=False, default=None, type='list', elements='str'), + protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') + ), + supports_check_mode=True + ) + + if not HAVE_DNSPYTHON: + module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR) + + if len(module.params["record"]) == 0: + module.fail_json(msg='record cannot be empty.') + + record = RecordManager(module) + result = {} + if module.params["state"] == 'absent': + result = record.remove_record() + elif module.params["state"] == 'present': + result = record.create_or_update_record() + + result['dns_rc'] = record.dns_rc + result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc) + if result['failed']: + module.fail_json(**result) + else: + result['record'] = dict(zone=record.zone, + record=module.params['record'], + type=module.params['type'], + ttl=module.params['ttl'], + value=record.value) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ocapi_command.py b/ansible_collections/community/general/plugins/modules/ocapi_command.py new file mode 100644 index 000000000..ed2366736 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ocapi_command.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ocapi_command +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to + perform an action. + - Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server. + type: int + update_image_path: + required: false + description: + - For C(FWUpload), the path on the local filesystem of the firmware update image. + type: str + job_name: + required: false + description: + - For C(DeleteJob) command, the name of the job to delete. + type: str + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + +author: "Mike Moerk (@mikemoerk)" +''' + +EXAMPLES = ''' + - name: Set the power state to low + community.general.ocapi_command: + category: Chassis + command: PowerModeLow + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set the power state to normal + community.general.ocapi_command: + category: Chassis + command: PowerModeNormal + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Set chassis indicator LED to on + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOn + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + - name: Set chassis indicator LED to off + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOff + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + - name: Reset Enclosure + community.general.ocapi_command: + category: Systems + command: PowerGracefulRestart + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + - name: Firmware Upload + community.general.ocapi_command: + category: Update + command: FWUpload + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + update_image_path: "/path/to/firmware.tar.gz" + - name: Firmware Update + community.general.ocapi_command: + category: Update + command: FWUpdate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + - name: Firmware Activate + community.general.ocapi_command: + category: Update + command: FWActivate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + - name: Delete Job + community.general.ocapi_command: + category: Jobs + command: DeleteJob + job_name: FirmwareUpdate + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +jobUri: + description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware Activate. + returned: when supported + type: str + sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/" + +operationStatusId: + description: OCAPI State ID (see OCAPI documentation for possible values). + returned: when supported + type: int + sample: 2 + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six.moves.urllib.parse import urljoin + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"], + "Systems": ["PowerGracefulRestart"], + "Update": ["FWUpload", "FWUpdate", "FWActivate"], + "Jobs": ["DeleteJob"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + update_image_path=dict(type='str'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Chassis": + if command.startswith("IndicatorLed"): + result = ocapi_utils.manage_chassis_indicator_led(command) + elif command.startswith("PowerMode"): + result = ocapi_utils.manage_system_power(command) + elif category == "Systems": + if command.startswith("Power"): + result = ocapi_utils.manage_system_power(command) + elif category == "Update": + if command == "FWUpload": + update_image_path = module.params.get("update_image_path") + if update_image_path is None: + module.fail_json(msg=to_native("Missing update_image_path.")) + result = ocapi_utils.upload_firmware_image(update_image_path) + elif command == "FWUpdate": + result = ocapi_utils.update_firmware_image() + elif command == "FWActivate": + result = ocapi_utils.activate_firmware_image() + elif category == "Jobs": + if command == "DeleteJob": + job_name = module.params.get("job_name") + if job_name is None: + module.fail_json("Missing job_name") + job_uri = urljoin(base_uri, "Jobs/" + job_name) + result = ocapi_utils.delete_job(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ocapi_info.py b/ansible_collections/community/general/plugins/modules/ocapi_info.py new file mode 100644 index 000000000..d7dfdccc7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ocapi_info.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ocapi_info +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to + get information back. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server. + type: int + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + job_name: + description: + - Name of job for fetching status. + type: str + + +author: "Mike Moerk (@mikemoerk)" +''' + +EXAMPLES = ''' + - name: Get job status + community.general.ocapi_info: + category: Status + command: JobStatus + baseuri: "http://iom1.wdc.com" + jobName: FirmwareUpdate + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +percentComplete: + description: Percent complete of the relevant operation. Applies to C(JobStatus) command. + returned: when supported + type: int + sample: 99 + +operationStatus: + description: Status of the relevant operation. Applies to C(JobStatus) command. See OCAPI documentation for details. + returned: when supported + type: str + sample: "Activate needed" + +operationStatusId: + description: Integer value of status (corresponds to operationStatus). Applies to C(JobStatus) command. See OCAPI documentation for details. + returned: when supported + type: int + sample: 65540 + +operationHealth: + description: Health of the operation. Applies to C(JobStatus) command. See OCAPI documentation for details. + returned: when supported + type: str + sample: "OK" + +operationHealthId: + description: > + Integer value for health of the operation (corresponds to C(operationHealth)). Applies to C(JobStatus) command. + See OCAPI documentation for details. + returned: when supported + type: str + sample: "OK" + +details: + description: Details of the relevant operation. Applies to C(JobStatus) command. + returned: when supported + type: list + elements: str + +status: + description: Dict containing status information. See OCAPI documentation for details. + returned: when supported + type: dict + sample: { + "Details": [ + "None" + ], + "Health": [ + { + "ID": 5, + "Name": "OK" + } + ], + "State": { + "ID": 16, + "Name": "In service" + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six.moves.urllib.parse import urljoin + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Jobs": ["JobStatus"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Jobs": + if command == "JobStatus": + if module.params.get("job_name") is None: + module.fail_json(msg=to_native( + "job_name required for JobStatus command.")) + job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"]) + result = ocapi_utils.get_job_status(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = False + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oci_vcn.py b/ansible_collections/community/general/plugins/modules/oci_vcn.py new file mode 100644 index 000000000..4e6487b8f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oci_vcn.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, 2018, Oracle and/or its affiliates. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oci_vcn +short_description: Manage Virtual Cloud Networks(VCN) in OCI +description: + - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. + The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from + U(https://github.com/oracle/oci-ansible-modules/releases). +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cidr_block: + description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present). + type: str + required: false + compartment_id: + description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present). + This option is mutually exclusive with I(vcn_id). + type: str + display_name: + description: A user-friendly name. Does not have to be unique, and it's changeable. + type: str + aliases: [ 'name' ] + dns_label: + description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to + form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example, + bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice + to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins + with a letter. The value cannot be changed. + type: str + state: + description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN. + type: str + default: present + choices: ['present', 'absent'] + vcn_id: + description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN + with I(state=present). This option is mutually exclusive with I(compartment_id). + type: str + aliases: [ 'id' ] +author: "Rohit Chaware (@rohitChaware)" +extends_documentation_fragment: + - community.general.oracle + - community.general.oracle_creatable_resource + - community.general.oracle_wait_options + - community.general.oracle_tags + - community.general.attributes + +''' + +EXAMPLES = """ +- name: Create a VCN + community.general.oci_vcn: + cidr_block: '10.0.0.0/16' + compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx' + display_name: my_vcn + dns_label: ansiblevcn + +- name: Updates the specified VCN's display name + community.general.oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + display_name: ansible_vcn + +- name: Delete the specified VCN + community.general.oci_vcn: + vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx + state: absent +""" + +RETURN = """ +vcn: + description: Information about the VCN + returned: On successful create and update operation + type: dict + sample: { + "cidr_block": "10.0.0.0/16", + compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", + "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", + "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", + "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", + "display_name": "ansible_vcn", + "dns_label": "ansiblevcn", + "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", + "lifecycle_state": "AVAILABLE", + "time_created": "2017-11-13T20:22:40.626000+00:00", + "vcn_domain_name": "ansiblevcn.oraclevcn.com" + } +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils + +try: + from oci.core.virtual_network_client import VirtualNetworkClient + from oci.core.models import CreateVcnDetails + from oci.core.models import UpdateVcnDetails + + HAS_OCI_PY_SDK = True +except ImportError: + HAS_OCI_PY_SDK = False + + +def delete_vcn(virtual_network_client, module): + result = oci_utils.delete_and_wait( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + delete_fn=virtual_network_client.delete_vcn, + kwargs_delete={"vcn_id": module.params["vcn_id"]}, + module=module, + ) + return result + + +def update_vcn(virtual_network_client, module): + result = oci_utils.check_and_update_resource( + resource_type="vcn", + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + kwargs_get={"vcn_id": module.params["vcn_id"]}, + update_fn=virtual_network_client.update_vcn, + primitive_params_update=["vcn_id"], + kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, + module=module, + update_attributes=list(UpdateVcnDetails().attribute_map.keys()), + ) + return result + + +def create_vcn(virtual_network_client, module): + create_vcn_details = CreateVcnDetails() + for attribute in create_vcn_details.attribute_map.keys(): + if attribute in module.params: + setattr(create_vcn_details, attribute, module.params[attribute]) + + result = oci_utils.create_and_wait( + resource_type="vcn", + create_fn=virtual_network_client.create_vcn, + kwargs_create={"create_vcn_details": create_vcn_details}, + client=virtual_network_client, + get_fn=virtual_network_client.get_vcn, + get_param="vcn_id", + module=module, + ) + return result + + +def main(): + module_args = oci_utils.get_taggable_arg_spec( + supports_create=True, supports_wait=True + ) + module_args.update( + dict( + cidr_block=dict(type="str", required=False), + compartment_id=dict(type="str", required=False), + display_name=dict(type="str", required=False, aliases=["name"]), + dns_label=dict(type="str", required=False), + state=dict( + type="str", + required=False, + default="present", + choices=["absent", "present"], + ), + vcn_id=dict(type="str", required=False, aliases=["id"]), + ) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + mutually_exclusive=[["compartment_id", "vcn_id"]], + ) + + if not HAS_OCI_PY_SDK: + module.fail_json(msg=missing_required_lib("oci")) + + virtual_network_client = oci_utils.create_service_client( + module, VirtualNetworkClient + ) + + exclude_attributes = {"display_name": True, "dns_label": True} + state = module.params["state"] + vcn_id = module.params["vcn_id"] + + if state == "absent": + if vcn_id is not None: + result = delete_vcn(virtual_network_client, module) + else: + module.fail_json( + msg="Specify vcn_id with state as 'absent' to delete a VCN." + ) + + else: + if vcn_id is not None: + result = update_vcn(virtual_network_client, module) + else: + result = oci_utils.check_and_create_resource( + resource_type="vcn", + create_fn=create_vcn, + kwargs_create={ + "virtual_network_client": virtual_network_client, + "module": module, + }, + list_fn=virtual_network_client.list_vcns, + kwargs_list={"compartment_id": module.params["compartment_id"]}, + module=module, + model=CreateVcnDetails(), + exclude_attributes=exclude_attributes, + ) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/odbc.py b/ansible_collections/community/general/plugins/modules/odbc.py new file mode 100644 index 000000000..fbc4b63ae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/odbc.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, John Westcott +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: odbc +author: "John Westcott IV (@john-westcott-iv)" +version_added: "1.0.0" +short_description: Execute SQL via ODBC +description: + - Read/Write info via ODBC drivers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + dsn: + description: + - The connection string passed into ODBC. + required: true + type: str + query: + description: + - The SQL query to perform. + required: true + type: str + params: + description: + - Parameters to pass to the SQL query. + type: list + elements: str + commit: + description: + - Perform a commit after the execution of the SQL query. + - Some databases allow a commit after a select whereas others raise an exception. + - Default is C(true) to support legacy module behavior. + type: bool + default: true + version_added: 1.3.0 +requirements: + - "python >= 2.6" + - "pyodbc" + +notes: + - "Like the command module, this module always returns changed = yes whether or not the query would change the database." + - "To alter this behavior you can use C(changed_when): [yes or no]." + - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)." +''' + +EXAMPLES = ''' +- name: Set some values in the test db + community.general.odbc: + dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;" + query: "Select * from table_a where column1 = ?" + params: + - "value1" + commit: false + changed_when: false +''' + +RETURN = ''' +results: + description: List of lists of strings containing selected rows, likely empty for DDL statements. + returned: success + type: list + elements: list +description: + description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." + returned: success + type: list + elements: dict +row_count: + description: "The number of rows selected or modified according to the cursor defaults to -1. See notes." + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +HAS_PYODBC = None +try: + import pyodbc + HAS_PYODBC = True +except ImportError as e: + HAS_PYODBC = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dsn=dict(type='str', required=True, no_log=True), + query=dict(type='str', required=True), + params=dict(type='list', elements='str'), + commit=dict(type='bool', default=True), + ), + ) + + dsn = module.params.get('dsn') + query = module.params.get('query') + params = module.params.get('params') + commit = module.params.get('commit') + + if not HAS_PYODBC: + module.fail_json(msg=missing_required_lib('pyodbc')) + + # Try to make a connection with the DSN + connection = None + try: + connection = pyodbc.connect(dsn) + except Exception as e: + module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e))) + + result = dict( + changed=True, + description=[], + row_count=-1, + results=[], + ) + + try: + cursor = connection.cursor() + + if params: + cursor.execute(query, params) + else: + cursor.execute(query) + if commit: + cursor.commit() + try: + # Get the rows out into an 2d array + for row in cursor.fetchall(): + new_row = [] + for column in row: + new_row.append("{0}".format(column)) + result['results'].append(new_row) + + # Return additional information from the cursor + for row_description in cursor.description: + description = {} + description['name'] = row_description[0] + description['type'] = row_description[1].__name__ + description['display_size'] = row_description[2] + description['internal_size'] = row_description[3] + description['precision'] = row_description[4] + description['scale'] = row_description[5] + description['nullable'] = row_description[6] + result['description'].append(description) + + result['row_count'] = cursor.rowcount + except pyodbc.ProgrammingError as pe: + pass + except Exception as e: + module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e))) + + cursor.close() + except Exception as e: + module.fail_json(msg="Failed to execute query: {0}".format(to_native(e))) + finally: + connection.close() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/office_365_connector_card.py b/ansible_collections/community/general/plugins/modules/office_365_connector_card.py new file mode 100644 index 000000000..ed8ebd188 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/office_365_connector_card.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Marc Sensenich +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +module: office_365_connector_card +short_description: Use webhooks to create Connector Card messages within an Office 365 group +description: + - Creates Connector Card messages through + Office 365 Connectors + U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups). +author: "Marc Sensenich (@marc-sensenich)" +notes: + - This module is not idempotent, therefore if the same task is run twice + there will be two Connector Cards created +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + webhook: + type: str + description: + - The webhook URL is given to you when you create a new Connector. + required: true + summary: + type: str + description: + - A string used for summarizing card content. + - This will be shown as the message subject. + - This is required if the text parameter isn't populated. + color: + type: str + description: + - Accent color used for branding or indicating status in the card. + title: + type: str + description: + - A title for the Connector message. Shown at the top of the message. + text: + type: str + description: + - The main text of the card. + - This will be rendered below the sender information and optional title, + - and above any sections or actions present. + actions: + type: list + elements: dict + description: + - This array of objects will power the action links + - found at the bottom of the card. + sections: + type: list + elements: dict + description: + - Contains a list of sections to display in the card. + - For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields). +''' + +EXAMPLES = """ +- name: Create a simple Connector Card + community.general.office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + text: 'Hello, World!' + +- name: Create a Connector Card with the full format + community.general.office_365_connector_card: + webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID + summary: This is the summary property + title: This is the **card's title** property + text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur + adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + color: E81123 + sections: + - title: This is the **section's title** property + activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg + activity_title: This is the section's **activityTitle** property + activity_subtitle: This is the section's **activitySubtitle** property + activity_text: This is the section's **activityText** property. + hero_image: + image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur + adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + facts: + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + images: + - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg + title: This is the image's alternate text + actions: + - "@type": ActionCard + name: Comment + inputs: + - "@type": TextInput + id: comment + is_multiline: true + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": ActionCard + name: Due Date + inputs: + - "@type": DateInput + id: dueDate + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": HttpPOST + name: Action's name prop. + target: http://... + - "@type": OpenUri + name: Action's name prop + targets: + - os: default + uri: http://... + - start_group: true + title: This is the title of a **second section** + text: This second section is visually separated from the first one by setting its + **startGroup** property to true. +""" + +RETURN = """ +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions" +OFFICE_365_CARD_TYPE = "MessageCard" +OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required." +OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable." + + +def build_actions(actions): + action_items = [] + + for action in actions: + action_item = snake_dict_to_camel_dict(action) + action_items.append(action_item) + + return action_items + + +def build_sections(sections): + sections_created = [] + + for section in sections: + sections_created.append(build_section(section)) + + return sections_created + + +def build_section(section): + section_payload = dict() + + if 'title' in section: + section_payload['title'] = section['title'] + + if 'start_group' in section: + section_payload['startGroup'] = section['start_group'] + + if 'activity_image' in section: + section_payload['activityImage'] = section['activity_image'] + + if 'activity_title' in section: + section_payload['activityTitle'] = section['activity_title'] + + if 'activity_subtitle' in section: + section_payload['activitySubtitle'] = section['activity_subtitle'] + + if 'activity_text' in section: + section_payload['activityText'] = section['activity_text'] + + if 'hero_image' in section: + section_payload['heroImage'] = section['hero_image'] + + if 'text' in section: + section_payload['text'] = section['text'] + + if 'facts' in section: + section_payload['facts'] = section['facts'] + + if 'images' in section: + section_payload['images'] = section['images'] + + if 'actions' in section: + section_payload['potentialAction'] = build_actions(section['actions']) + + return section_payload + + +def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None): + payload = dict() + payload['@context'] = OFFICE_365_CARD_CONTEXT + payload['@type'] = OFFICE_365_CARD_TYPE + + if summary is not None: + payload['summary'] = summary + + if color is not None: + payload['themeColor'] = color + + if title is not None: + payload['title'] = title + + if text is not None: + payload['text'] = text + + if actions: + payload['potentialAction'] = build_actions(actions) + + if sections: + payload['sections'] = build_sections(sections) + + payload = module.jsonify(payload) + return payload + + +def do_notify_connector_card_webhook(module, webhook, payload): + headers = { + 'Content-Type': 'application/json' + } + + response, info = fetch_url( + module=module, + url=webhook, + headers=headers, + method='POST', + data=payload + ) + + if info['status'] == 200: + module.exit_json(changed=True) + elif info['status'] == 400 and module.check_mode: + if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG: + module.exit_json(changed=True) + else: + module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG) + else: + module.fail_json( + msg="failed to send %s as a connector card to Incoming Webhook: %s" + % (payload, info['msg']) + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook=dict(required=True, no_log=True), + summary=dict(type='str'), + color=dict(type='str'), + title=dict(type='str'), + text=dict(type='str'), + actions=dict(type='list', elements='dict'), + sections=dict(type='list', elements='dict') + ), + supports_check_mode=True + ) + + webhook = module.params['webhook'] + summary = module.params['summary'] + color = module.params['color'] + title = module.params['title'] + text = module.params['text'] + actions = module.params['actions'] + sections = module.params['sections'] + + payload = build_payload_for_connector_card( + module, + summary, + color, + title, + text, + actions, + sections) + + if module.check_mode: + # In check mode, send an empty payload to validate connection + check_mode_payload = build_payload_for_connector_card(module) + do_notify_connector_card_webhook(module, webhook, check_mode_payload) + + do_notify_connector_card_webhook(module, webhook, payload) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ohai.py b/ansible_collections/community/general/plugins/modules/ohai.py new file mode 100644 index 000000000..7fdab3bb7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ohai.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ohai +short_description: Returns inventory data from I(Ohai) +description: + - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program + (U(https://docs.chef.io/ohai.html)) on the remote host and + returns JSON inventory data. + I(Ohai) data is a bit more verbose and nested than I(facter). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: {} +notes: [] +requirements: [ "ohai" ] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +''' + +EXAMPLES = ''' +# Retrieve (ohai) data from all Web servers and store in one-file per host +ansible webservers -m ohai --tree=/tmp/ohaidata +''' +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + cmd = ["/usr/bin/env", "ohai"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/omapi_host.py b/ansible_collections/community/general/plugins/modules/omapi_host.py new file mode 100644 index 000000000..c93c57853 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/omapi_host.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: omapi_host +short_description: Setup OMAPI hosts +description: Manage OMAPI hosts into compatible DHCPd servers +requirements: + - pypureomapi +author: + - Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Create or remove OMAPI host. + type: str + required: true + choices: [ absent, present ] + hostname: + description: + - Sets the host lease hostname (mandatory if state=present). + type: str + aliases: [ name ] + host: + description: + - Sets OMAPI server host to interact with. + type: str + default: localhost + port: + description: + - Sets the OMAPI server port to interact with. + type: int + default: 7911 + key_name: + description: + - Sets the TSIG key name for authenticating against OMAPI server. + type: str + required: true + key: + description: + - Sets the TSIG key content for authenticating against OMAPI server. + type: str + required: true + macaddr: + description: + - Sets the lease host MAC address. + type: str + required: true + ip: + description: + - Sets the lease host IP address. + type: str + statements: + description: + - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). + type: list + elements: str + default: [] + ddns: + description: + - Enable dynamic DNS updates for this host. + type: bool + default: false + +''' +EXAMPLES = r''' +- name: Add a host using OMAPI + community.general.omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.98.4.55 + macaddr: 44:dd:ab:dd:11:44 + name: server01 + ip: 192.168.88.99 + ddns: true + statements: + - filename "pxelinux.0" + - next-server 1.1.1.1 + state: present + +- name: Remove a host using OMAPI + community.general.omapi_host: + key_name: defomapi + key: +bFQtBCta6j2vWkjPkNFtgA== + host: 10.1.1.1 + macaddr: 00:66:ab:dd:11:44 + state: absent +''' + +RETURN = r''' +lease: + description: dictionary containing host information + returned: success + type: complex + contains: + ip-address: + description: IP address, if there is. + returned: success + type: str + sample: '192.168.1.5' + hardware-address: + description: MAC address + returned: success + type: str + sample: '00:11:22:33:44:55' + hardware-type: + description: hardware type, generally '1' + returned: success + type: int + sample: 1 + name: + description: hostname + returned: success + type: str + sample: 'mydesktop' +''' + +import binascii +import socket +import struct +import traceback + +PUREOMAPI_IMP_ERR = None +try: + from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound + from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac + from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE + pureomapi_found = True +except ImportError: + PUREOMAPI_IMP_ERR = traceback.format_exc() + pureomapi_found = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +class OmapiHostManager: + def __init__(self, module): + self.module = module + self.omapi = None + self.connect() + + def connect(self): + try: + self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']), + self.module.params['key']) + except binascii.Error: + self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.") + except OmapiError as e: + self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' " + "are valid. Exception was: %s" % to_native(e)) + except socket.error as e: + self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e)) + + def get_host(self, macaddr): + msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict')) + msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr))) + msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1))) + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + return None + return response + + @staticmethod + def unpack_facts(obj): + result = dict(obj) + if 'hardware-address' in result: + result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')])) + + if 'ip-address' in result: + result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')])) + + if 'hardware-type' in result: + result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')]) + + return result + + def setup_host(self): + if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0: + self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.") + + msg = None + host_response = self.get_host(self.module.params['macaddr']) + # If host was not found using macaddr, add create message + if host_response is None: + msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict')) + msg.message.append((to_bytes('create'), struct.pack('!I', 1))) + msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1))) + msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr']))) + msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1))) + msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname']))) + if self.module.params['ip'] is not None: + msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip']))) + + stmt_join = "" + if self.module.params['ddns']: + stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname']) + + try: + if len(self.module.params['statements']) > 0: + stmt_join += "; ".join(self.module.params['statements']) + stmt_join += "; " + except TypeError as e: + self.module.fail_json(msg="Invalid statements found: %s" % to_native(e)) + + if len(stmt_join) > 0: + msg.obj.append((to_bytes('statements'), to_bytes(stmt_join))) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_UPDATE: + self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj)) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + # Forge update message + else: + response_obj = self.unpack_facts(host_response.obj) + fields_to_update = {} + + if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \ + unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']: + fields_to_update['ip-address'] = pack_ip(self.module.params['ip']) + + # Name cannot be changed + if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']: + self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. " + "Please delete host and add new." % + (response_obj['name'], self.module.params['hostname'])) + + """ + # It seems statements are not returned by OMAPI, then we cannot modify them at this moment. + if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \ + response_obj['statements'] != self.module.params['statements']: + with open('/tmp/omapi', 'w') as fb: + for (k,v) in iteritems(response_obj): + fb.writelines('statements: %s %s\n' % (k, v)) + """ + if len(fields_to_update) == 0: + self.module.exit_json(changed=False, lease=response_obj) + else: + msg = OmapiMessage.update(host_response.handle) + msg.update_object(fields_to_update) + + try: + response = self.omapi.query_server(msg) + if response.opcode != OMAPI_OP_STATUS: + self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters " + "are valid.") + self.module.exit_json(changed=True) + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + def remove_host(self): + try: + self.omapi.del_host(self.module.params['macaddr']) + self.module.exit_json(changed=True) + except OmapiErrorNotFound: + self.module.exit_json() + except OmapiError as e: + self.module.fail_json(msg="OMAPI error: %s" % to_native(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', required=True, choices=['absent', 'present']), + host=dict(type='str', default="localhost"), + port=dict(type='int', default=7911), + key_name=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=True), + macaddr=dict(type='str', required=True), + hostname=dict(type='str', aliases=['name']), + ip=dict(type='str'), + ddns=dict(type='bool', default=False), + statements=dict(type='list', elements='str', default=[]), + ), + supports_check_mode=False, + ) + + if not pureomapi_found: + module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR) + + if module.params['key'] is None or len(module.params["key"]) == 0: + module.fail_json(msg="'key' parameter cannot be empty.") + + if module.params['key_name'] is None or len(module.params["key_name"]) == 0: + module.fail_json(msg="'key_name' parameter cannot be empty.") + + host_manager = OmapiHostManager(module) + try: + if module.params['state'] == 'present': + host_manager.setup_host() + elif module.params['state'] == 'absent': + host_manager.remove_host() + except ValueError as e: + module.fail_json(msg="OMAPI input value error: %s" % to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_host.py b/ansible_collections/community/general/plugins/modules/one_host.py new file mode 100644 index 000000000..c4578f950 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_host.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2018 www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: one_host + +short_description: Manages OpenNebula Hosts + + +requirements: + - pyone + +description: + - "Manages OpenNebula Hosts" + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - Hostname of the machine to manage. + required: true + type: str + state: + description: + - Takes the host to the desired lifecycle state. + - If C(absent) the host will be deleted from the cluster. + - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states). + - If C(enabled) the host is fully operational. + - C(disabled), e.g. to perform maintenance operations. + - C(offline), host is totally offline. + choices: + - absent + - present + - enabled + - disabled + - offline + default: present + type: str + im_mad_name: + description: + - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name) + default: kvm + type: str + vmm_mad_name: + description: + - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name) + default: kvm + type: str + cluster_id: + description: + - The cluster ID. + default: 0 + type: int + cluster_name: + description: + - The cluster specified by name. + type: str + labels: + description: + - The labels for this host. + type: list + elements: str + template: + description: + - The template or attribute changes to merge into the host template. + aliases: + - attributes + type: dict + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + +author: + - Rafael del Valle (@rvalle) +''' + +EXAMPLES = ''' +- name: Create a new host in OpenNebula + community.general.one_host: + name: host1 + cluster_id: 1 + api_url: http://127.0.0.1:2633/RPC2 + +- name: Create a host and adjust its template + community.general.one_host: + name: host2 + cluster_name: default + template: + LABELS: + - gold + - ssd + RESERVED_CPU: -100 +''' + +# TODO: pending setting guidelines on returned values +RETURN = ''' +''' + +# TODO: Documentation on valid state transitions is required to properly implement all valid cases +# TODO: To be coherent with CLI this module should also provide "flush" functionality + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + +try: + from pyone import HOST_STATES, HOST_STATUS +except ImportError: + pass # handled at module utils + + +# Pseudo definitions... + +HOST_ABSENT = -99 # the host is absent (special case defined by this module) + + +class HostModule(OpenNebulaModule): + + def __init__(self): + + argument_spec = dict( + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'), + im_mad_name=dict(type='str', default="kvm"), + vmm_mad_name=dict(type='str', default="kvm"), + cluster_id=dict(type='int', default=0), + cluster_name=dict(type='str'), + labels=dict(type='list', elements='str'), + template=dict(type='dict', aliases=['attributes']), + ) + + mutually_exclusive = [ + ['cluster_id', 'cluster_name'] + ] + + OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive) + + def allocate_host(self): + """ + Creates a host entry in OpenNebula + Returns: True on success, fails otherwise. + + """ + if not self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')): + self.fail(msg="could not allocate host") + else: + self.result['changed'] = True + return True + + def wait_for_host_state(self, host, target_states): + """ + Utility method that waits for a host state. + Args: + host: + target_states: + + """ + return self.wait_for_state('host', + lambda: self.one.host.info(host.ID).STATE, + lambda s: HOST_STATES(s).name, target_states, + invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]) + + def run(self, one, module, result): + + # Get the list of hosts + host_name = self.get_parameter("name") + host = self.get_host_by_name(host_name) + + # manage host state + desired_state = self.get_parameter('state') + if bool(host): + current_state = host.STATE + current_state_name = HOST_STATES(host.STATE).name + else: + current_state = HOST_ABSENT + current_state_name = "ABSENT" + + # apply properties + if desired_state == 'present': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]: + self.fail(msg="invalid host state %s" % current_state_name) + + elif desired_state == 'enabled': + if current_state == HOST_ABSENT: + self.allocate_host() + host = self.get_host_by_name(host_name) + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]: + if one.host.status(host.ID, HOST_STATUS.ENABLED): + self.wait_for_host_state(host, [HOST_STATES.MONITORED]) + result['changed'] = True + else: + self.fail(msg="could not enable host") + elif current_state in [HOST_STATES.MONITORED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name) + + elif desired_state == 'disabled': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be put in disabled state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: + if one.host.status(host.ID, HOST_STATUS.DISABLED): + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + result['changed'] = True + else: + self.fail(msg="could not disable host") + elif current_state in [HOST_STATES.DISABLED]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name) + + elif desired_state == 'offline': + if current_state == HOST_ABSENT: + self.fail(msg='absent host cannot be placed in offline state') + elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: + if one.host.status(host.ID, HOST_STATUS.OFFLINE): + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + result['changed'] = True + else: + self.fail(msg="could not set host offline") + elif current_state in [HOST_STATES.OFFLINE]: + pass + else: + self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name) + + elif desired_state == 'absent': + if current_state != HOST_ABSENT: + if one.host.delete(host.ID): + result['changed'] = True + else: + self.fail(msg="could not delete host from cluster") + + # if we reach this point we can assume that the host was taken to the desired state + + if desired_state != "absent": + # manipulate or modify the template + desired_template_changes = self.get_parameter('template') + + if desired_template_changes is None: + desired_template_changes = dict() + + # complete the template with specific ansible parameters + if self.is_parameter('labels'): + desired_template_changes['LABELS'] = self.get_parameter('labels') + + if self.requires_template_update(host.TEMPLATE, desired_template_changes): + # setup the root element so that pyone will generate XML instead of attribute vector + desired_template_changes = {"TEMPLATE": desired_template_changes} + if one.host.update(host.ID, desired_template_changes, 1): # merge the template + result['changed'] = True + else: + self.fail(msg="failed to update the host template") + + # the cluster + if host.CLUSTER_ID != self.get_parameter('cluster_id'): + if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): + result['changed'] = True + else: + self.fail(msg="failed to update the host cluster") + + # return + self.exit() + + +def main(): + HostModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_image.py b/ansible_collections/community/general/plugins/modules/one_image.py new file mode 100644 index 000000000..a50b33e93 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_image.py @@ -0,0 +1,414 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_image +short_description: Manages OpenNebula images +description: + - Manages OpenNebula images +requirements: + - pyone +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + type: str + id: + description: + - A C(id) of the image you would like to manage. + type: int + name: + description: + - A C(name) of the image you would like to manage. + type: str + state: + description: + - C(present) - state that is used to manage the image + - C(absent) - delete the image + - C(cloned) - clone the image + - C(renamed) - rename the image to the C(new_name) + choices: ["present", "absent", "cloned", "renamed"] + default: present + type: str + enabled: + description: + - Whether the image should be enabled or disabled. + type: bool + new_name: + description: + - A name that will be assigned to the existing or new image. + - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. + type: str +author: + - "Milan Ilic (@ilicmilan)" +''' + +EXAMPLES = ''' +- name: Fetch the IMAGE by id + community.general.one_image: + id: 45 + register: result + +- name: Print the IMAGE properties + ansible.builtin.debug: + var: result + +- name: Rename existing IMAGE + community.general.one_image: + id: 34 + state: renamed + new_name: bar-image + +- name: Disable the IMAGE by id + community.general.one_image: + id: 37 + enabled: false + +- name: Enable the IMAGE by name + community.general.one_image: + name: bar-image + enabled: true + +- name: Clone the IMAGE by name + community.general.one_image: + name: bar-image + state: cloned + new_name: bar-image-clone + register: result + +- name: Delete the IMAGE by id + community.general.one_image: + id: '{{ result.id }}' + state: absent +''' + +RETURN = ''' +id: + description: image id + type: int + returned: success + sample: 153 +name: + description: image name + type: str + returned: success + sample: app1 +group_id: + description: image's group id + type: int + returned: success + sample: 1 +group_name: + description: image's group name + type: str + returned: success + sample: one-users +owner_id: + description: image's owner id + type: int + returned: success + sample: 143 +owner_name: + description: image's owner name + type: str + returned: success + sample: ansible-test +state: + description: state of image instance + type: str + returned: success + sample: READY +used: + description: is image in use + type: bool + returned: success + sample: true +running_vms: + description: count of running vms that use this image + type: int + returned: success + sample: 7 +''' + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_image(module, client, predicate): + # Filter -2 means fetch all images user can Use + pool = client.imagepool.info(-2, -1, -1, -1) + + for image in pool.IMAGE: + if predicate(image): + return image + + return None + + +def get_image_by_name(module, client, image_name): + return get_image(module, client, lambda image: (image.NAME == image_name)) + + +def get_image_by_id(module, client, image_id): + return get_image(module, client, lambda image: (image.ID == image_id)) + + +def get_image_instance(module, client, requested_id, requested_name): + if requested_id: + return get_image_by_id(module, client, requested_id) + else: + return get_image_by_name(module, client, requested_name) + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +def get_image_info(image): + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + } + + return info + + +def wait_for_state(module, client, image_id, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image = client.image.info(image_id) + state = image.STATE + + if state_predicate(state): + return image + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_ready(module, client, image_id, wait_timeout=60): + return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) + + +def wait_for_delete(module, client, image_id, wait_timeout=60): + return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) + + +def enable_image(module, client, image, enable): + image = client.image.info(image.ID) + changed = False + + state = image.STATE + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not module.check_mode: + client.image.enable(image.ID, enable) + + result = get_image_info(image) + result['changed'] = changed + + return result + + +def clone_image(module, client, image, new_name): + if new_name is None: + new_name = "Copy of " + image.NAME + + tmp_image = get_image_by_name(module, client, new_name) + if tmp_image: + result = get_image_info(tmp_image) + result['changed'] = False + return result + + if image.STATE == IMAGE_STATES.index('DISABLED'): + module.fail_json(msg="Cannot clone DISABLED image") + + if not module.check_mode: + new_id = client.image.clone(image.ID, new_name) + wait_for_ready(module, client, new_id) + image = client.image.info(new_id) + + result = get_image_info(image) + result['changed'] = True + + return result + + +def rename_image(module, client, image, new_name): + if new_name is None: + module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + + if new_name == image.NAME: + result = get_image_info(image) + result['changed'] = False + return result + + tmp_image = get_image_by_name(module, client, new_name) + if tmp_image: + module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + + if not module.check_mode: + client.image.rename(image.ID, new_name) + + result = get_image_info(image) + result['changed'] = True + return result + + +def delete_image(module, client, image): + + if not image: + return {'changed': False} + + if image.RUNNING_VMS > 0: + module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") + + if not module.check_mode: + client.image.delete(image.ID) + wait_for_delete(module, client, image.ID) + + return {'changed': True} + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not (url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "id": {"required": False, "type": "int"}, + "name": {"required": False, "type": "str"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'cloned', 'renamed'], + "type": "str" + }, + "enabled": {"required": False, "type": "bool"}, + "new_name": {"required": False, "type": "str"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[['id', 'name']], + supports_check_mode=True) + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + id = params.get('id') + name = params.get('name') + state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + result = {} + + if not id and state == 'renamed': + module.fail_json(msg="Option 'id' is required when the state is 'renamed'") + + image = get_image_instance(module, client, id, name) + if not image and state != 'absent': + if id: + module.fail_json(msg="There is no image with id=" + str(id)) + else: + module.fail_json(msg="There is no image with name=" + name) + + if state == 'absent': + result = delete_image(module, client, image) + else: + result = get_image_info(image) + changed = False + result['changed'] = False + + if enabled is not None: + result = enable_image(module, client, image, enabled) + if state == "cloned": + result = clone_image(module, client, image, new_name) + elif state == "renamed": + result = rename_image(module, client, image, new_name) + + changed = changed or result['changed'] + result['changed'] = changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_image_info.py b/ansible_collections/community/general/plugins/modules/one_image_info.py new file mode 100644 index 000000000..938f0ef2a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_image_info.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_image_info +short_description: Gather information on OpenNebula images +description: + - Gather information on OpenNebula images. + - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change. +requirements: + - pyone +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + type: str + ids: + description: + - A list of images ids whose facts you want to gather. + aliases: ['id'] + type: list + elements: str + name: + description: + - A C(name) of the image whose facts will be gathered. + - If the C(name) begins with '~' the C(name) will be used as regex pattern + - which restricts the list of images (whose facts will be returned) whose names match specified regex. + - Also, if the C(name) begins with '~*' case-insensitive matching will be performed. + - See examples for more details. + type: str +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +''' + +EXAMPLES = ''' +- name: Gather facts about all images + community.general.one_image_info: + register: result + +- name: Print all images facts + ansible.builtin.debug: + msg: result + +- name: Gather facts about an image using ID + community.general.one_image_info: + ids: + - 123 + +- name: Gather facts about an image using the name + community.general.one_image_info: + name: 'foo-image' + register: foo_image + +- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' + community.general.one_image_info: + name: '~app-image-.*' + register: app_images + +- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases + community.general.one_image_info: + name: '~*foo-image-.*' + register: foo_images +''' + +RETURN = ''' +images: + description: A list of images info + type: complex + returned: success + contains: + id: + description: image id + type: int + sample: 153 + name: + description: image name + type: str + sample: app1 + group_id: + description: image's group id + type: int + sample: 1 + group_name: + description: image's group name + type: str + sample: one-users + owner_id: + description: image's owner id + type: int + sample: 143 + owner_name: + description: image's owner name + type: str + sample: ansible-test + state: + description: state of image instance + type: str + sample: READY + used: + description: is image in use + type: bool + sample: true + running_vms: + description: count of running vms that use this image + type: int + sample: 7 +''' + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + +from ansible.module_utils.basic import AnsibleModule +import os + + +def get_all_images(client): + pool = client.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +def get_image_info(image): + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + } + return info + + +def get_images_by_ids(module, client, ids): + images = [] + pool = get_all_images(client) + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + +def get_images_by_name(module, client, name_pattern): + + images = [] + pattern = None + + pool = get_all_images(client) + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) + break + + # if the specific name is indicated + if pattern is None and len(images) == 0: + module.fail_json(msg="There is no IMAGE with name=" + name_pattern) + + return images + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not (url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, + "name": {"required": False, "type": "str"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[['ids', 'name']], + supports_check_mode=True) + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + ids = params.get('ids') + name = params.get('name') + client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + if ids: + images = get_images_by_ids(module, client, ids) + elif name: + images = get_images_by_name(module, client, name) + else: + images = get_all_images(client).IMAGE + + result = { + 'images': [get_image_info(image) for image in images], + } + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_service.py b/ansible_collections/community/general/plugins/modules/one_service.py new file mode 100644 index 000000000..4f5143887 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_service.py @@ -0,0 +1,759 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_service +short_description: Deploy and manage OpenNebula services +description: + - Manage OpenNebula services +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_url: + description: + - URL of the OpenNebula OneFlow API server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the ONEFLOW_URL environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used. + type: str + template_name: + description: + - Name of service template to use to create a new instance of a service. + type: str + template_id: + description: + - ID of a service template to use to create a new instance of a service. + type: int + service_id: + description: + - ID of a service instance that you would like to manage. + type: int + service_name: + description: + - Name of a service instance that you would like to manage. + type: str + unique: + description: + - Setting I(unique=true) will make sure that there is only one service instance running with a name set with C(service_name) when + instantiating a service from a template specified with I(template_id) or I(template_name). Check examples below. + type: bool + default: false + state: + description: + - C(present) - instantiate a service from a template specified with I(template_id) or I(template_name). + - C(absent) - terminate an instance of a service specified with I(template_id) or I(template_name). + choices: ["present", "absent"] + default: present + type: str + mode: + description: + - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + type: str + owner_id: + description: + - ID of the user which will be set as the owner of the service. + type: int + group_id: + description: + - ID of the group which will be set as the group of the service. + type: int + wait: + description: + - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING. + type: bool + default: false + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + type: int + custom_attrs: + description: + - Dictionary of key/value custom attributes which will be used when instantiating a new service. + default: {} + type: dict + role: + description: + - Name of the role whose cardinality should be changed. + type: str + cardinality: + description: + - Number of VMs for the specified role. + type: int + force: + description: + - Force the new cardinality even if it is outside the limits. + type: bool + default: false +author: + - "Milan Ilic (@ilicmilan)" +''' + +EXAMPLES = ''' +- name: Instantiate a new service + community.general.one_service: + template_id: 90 + register: result + +- name: Print service properties + ansible.builtin.debug: + msg: result + +- name: Instantiate a new service with specified service_name, service group and mode + community.general.one_service: + template_name: 'app1_template' + service_name: 'app1' + group_id: 1 + mode: '660' + +- name: Instantiate a new service with template_id and pass custom_attrs dict + community.general.one_service: + template_id: 90 + custom_attrs: + public_network_id: 21 + private_network_id: 26 + +- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing + community.general.one_service: + template_id: 53 + service_name: 'foo' + unique: true + +- name: Delete a service by ID + community.general.one_service: + service_id: 153 + state: absent + +- name: Get service info + community.general.one_service: + service_id: 153 + register: service_info + +- name: Change service owner, group and mode + community.general.one_service: + service_name: 'app2' + owner_id: 34 + group_id: 113 + mode: '600' + +- name: Instantiate service and wait for it to become RUNNING + community.general.one_service: + template_id: 43 + service_name: 'foo1' + +- name: Wait service to become RUNNING + community.general.one_service: + service_id: 112 + wait: true + +- name: Change role cardinality + community.general.one_service: + service_id: 153 + role: bar + cardinality: 5 + +- name: Change role cardinality and wait for it to be applied + community.general.one_service: + service_id: 112 + role: foo + cardinality: 7 + wait: true +''' + +RETURN = ''' +service_id: + description: service id + type: int + returned: success + sample: 153 +service_name: + description: service name + type: str + returned: success + sample: app1 +group_id: + description: service's group id + type: int + returned: success + sample: 1 +group_name: + description: service's group name + type: str + returned: success + sample: one-users +owner_id: + description: service's owner id + type: int + returned: success + sample: 143 +owner_name: + description: service's owner name + type: str + returned: success + sample: ansible-test +state: + description: state of service instance + type: str + returned: success + sample: RUNNING +mode: + description: service's mode + type: int + returned: success + sample: 660 +roles: + description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids + type: list + returned: success + sample: + - {"cardinality": 1,"name": "foo","state": "RUNNING", "ids": [ 123, 456 ]} + - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]} +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + +STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE", + "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN") + + +def get_all_templates(module, auth): + try: + all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(all_templates.read()) + + +def get_template(module, auth, pred): + all_templates_dict = get_all_templates(module, auth) + + found = 0 + found_template = None + template_name = '' + + if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]: + for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(template): + found = found + 1 + found_template = template + template_name = template["NAME"] + + if found <= 0: + return None + elif found > 1: + module.fail_json(msg="There is no template with unique name: " + template_name) + else: + return found_template + + +def get_all_services(module, auth): + try: + response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + return module.from_json(response.read()) + + +def get_service(module, auth, pred): + all_services_dict = get_all_services(module, auth) + + found = 0 + found_service = None + service_name = '' + + if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]: + for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]: + if pred(service): + found = found + 1 + found_service = service + service_name = service["NAME"] + + # fail if there are more services with same name + if found > 1: + module.fail_json(msg="There are multiple services with a name: '" + + service_name + "'. You have to use a unique service name or use 'service_id' instead.") + elif found <= 0: + return None + else: + return found_service + + +def get_service_by_id(module, auth, service_id): + return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None + + +def get_service_by_name(module, auth, service_name): + return get_service(module, auth, lambda service: (service["NAME"] == service_name)) + + +def get_service_info(module, auth, service): + + result = { + "service_id": int(service["ID"]), + "service_name": service["NAME"], + "group_id": int(service["GID"]), + "group_name": service["GNAME"], + "owner_id": int(service["UID"]), + "owner_name": service["UNAME"], + "state": STATES[service["TEMPLATE"]["BODY"]["state"]] + } + + roles_status = service["TEMPLATE"]["BODY"]["roles"] + roles = [] + for role in roles_status: + nodes_ids = [] + if "nodes" in role: + for node in role["nodes"]: + nodes_ids.append(node["deploy_id"]) + roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids}) + + result["roles"] = roles + result["mode"] = int(parse_service_permissions(service)) + + return result + + +def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): + # make sure that the values in custom_attrs dict are strings + custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) + + data = { + "action": { + "perform": "instantiate", + "params": { + "merge_template": { + "custom_attrs_values": custom_attrs_with_str, + "name": service_name + } + } + } + } + + try: + response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST", + data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg=str(e)) + + service_result = module.from_json(response.read())["DOCUMENT"] + + return service_result + + +def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + try: + status_result = open_url(auth.url + "/service/" + str(service_id), method="GET", + force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Request for service status has failed. Error message: " + str(e)) + + status_result = module.from_json(status_result.read()) + service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"] + + if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]: + return status_result["DOCUMENT"] + elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]: + log_message = '' + for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]: + if log_info["severity"] == "E": + log_message = log_message + log_info["message"] + break + + module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired") + + +def change_service_permissions(module, auth, service_id, permissions): + + data = { + "action": { + "perform": "chmod", + "params": {"octet": permissions} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_owner(module, auth, service_id, owner_id): + data = { + "action": { + "perform": "chown", + "params": {"owner_id": owner_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_service_group(module, auth, service_id, group_id): + + data = { + "action": { + "perform": "chgrp", + "params": {"group_id": group_id} + } + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True, + url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + +def change_role_cardinality(module, auth, service_id, role, cardinality, force): + + data = { + "cardinality": cardinality, + "force": force + } + + try: + status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT", + force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data)) + except Exception as e: + module.fail_json(msg=str(e)) + + if status_result.getcode() != 204: + module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode())) + + +def check_change_service_owner(module, service, owner_id): + old_owner_id = int(service["UID"]) + + return old_owner_id != owner_id + + +def check_change_service_group(module, service, group_id): + old_group_id = int(service["GID"]) + + return old_group_id != group_id + + +def parse_service_permissions(service): + perm_dict = service["PERMISSIONS"] + ''' + This is the structure of the 'PERMISSIONS' dictionary: + + "PERMISSIONS": { + "OWNER_U": "1", + "OWNER_M": "1", + "OWNER_A": "0", + "GROUP_U": "0", + "GROUP_M": "0", + "GROUP_A": "0", + "OTHER_U": "0", + "OTHER_M": "0", + "OTHER_A": "0" + } + ''' + + owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"]) + group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"]) + other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"]) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def check_change_service_permissions(module, service, permissions): + old_permissions = parse_service_permissions(service) + + return old_permissions != permissions + + +def check_change_role_cardinality(module, service, role_name, cardinality): + roles_list = service["TEMPLATE"]["BODY"]["roles"] + + for role in roles_list: + if role["name"] == role_name: + return int(role["cardinality"]) != cardinality + + module.fail_json(msg="There is no role with name: " + role_name) + + +def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout): + if not service_name: + service_name = '' + changed = False + service = None + + if unique: + service = get_service_by_name(module, auth, service_name) + + if not service: + if not module.check_mode: + service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) + changed = True + + # if check_mode=true and there would be changes, service doesn't exist and we can not get it + if module.check_mode and changed: + return {"changed": True} + + result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait, + wait_timeout=wait_timeout, permissions=permissions, service=service) + + if result["changed"]: + changed = True + + result["changed"] = changed + + return result + + +def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None, + role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None): + + changed = False + + if not service: + service = get_service_by_id(module, auth, service_id) + else: + service_id = service["ID"] + + if not service: + module.fail_json(msg="There is no service with id: " + str(service_id)) + + if owner_id: + if check_change_service_owner(module, service, owner_id): + if not module.check_mode: + change_service_owner(module, auth, service_id, owner_id) + changed = True + if group_id: + if check_change_service_group(module, service, group_id): + if not module.check_mode: + change_service_group(module, auth, service_id, group_id) + changed = True + if permissions: + if check_change_service_permissions(module, service, permissions): + if not module.check_mode: + change_service_permissions(module, auth, service_id, permissions) + changed = True + + if role: + if check_change_role_cardinality(module, service, role, cardinality): + if not module.check_mode: + change_role_cardinality(module, auth, service_id, role, cardinality, force) + changed = True + + if wait and not module.check_mode: + service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout) + + # if something has changed, fetch service info again + if changed: + service = get_service_by_id(module, auth, service_id) + + service_info = get_service_info(module, auth, service) + service_info["changed"] = changed + + return service_info + + +def delete_service(module, auth, service_id): + service = get_service_by_id(module, auth, service_id) + if not service: + return {"changed": False} + + service_info = get_service_info(module, auth, service) + + service_info["changed"] = True + + if module.check_mode: + return service_info + + try: + result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password) + except Exception as e: + module.fail_json(msg="Service deletion has failed. Error message: " + str(e)) + + return service_info + + +def get_template_by_name(module, auth, template_name): + return get_template(module, auth, lambda template: (template["NAME"] == template_name)) + + +def get_template_by_id(module, auth, template_id): + return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None + + +def get_template_id(module, auth, requested_id, requested_name): + template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name) + + if template: + return template["ID"] + + return None + + +def get_service_id_by_name(module, auth, service_name): + service = get_service_by_name(module, auth, service_name) + + if service: + return service["ID"] + + return None + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONEFLOW_URL') + + if not username: + username = os.environ.get('ONEFLOW_USERNAME') + + if not password: + password = os.environ.get('ONEFLOW_PASSWORD') + + if not (url and username and password): + module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'user', 'password')) + + return auth_params(url=url, user=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "service_name": {"required": False, "type": "str"}, + "service_id": {"required": False, "type": "int"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "state": { + "default": "present", + "choices": ['present', 'absent'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "unique": {"default": False, "type": "bool"}, + "wait": {"default": False, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "custom_attrs": {"default": {}, "type": "dict"}, + "role": {"required": False, "type": "str"}, + "cardinality": {"required": False, "type": "int"}, + "force": {"default": False, "type": "bool"} + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'service_id'], + ['service_id', 'service_name'], + ['template_id', 'template_name', 'role'], + ['template_id', 'template_name', 'cardinality'], + ['service_id', 'custom_attrs'] + ], + required_together=[['role', 'cardinality']], + supports_check_mode=True) + + auth = get_connection_info(module) + params = module.params + service_name = params.get('service_name') + service_id = params.get('service_id') + + requested_template_id = params.get('template_id') + requested_template_name = params.get('template_name') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + unique = params.get('unique') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + custom_attrs = params.get('custom_attrs') + role = params.get('role') + cardinality = params.get('cardinality') + force = params.get('force') + + template_id = None + + if requested_template_id or requested_template_name: + template_id = get_template_id(module, auth, requested_template_id, requested_template_name) + if not template_id: + if requested_template_id: + module.fail_json(msg="There is no template with template_id: " + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + if unique and not service_name: + module.fail_json(msg="You cannot use unique without passing service_name!") + + if template_id and state == 'absent': + module.fail_json(msg="State absent is not valid for template") + + if template_id and state == 'present': # Instantiate a service + result = create_service_and_operation(module, auth, template_id, service_name, owner_id, + group_id, permissions, custom_attrs, unique, wait, wait_timeout) + else: + if not (service_id or service_name): + module.fail_json(msg="To manage the service at least the service id or service name should be specified!") + if custom_attrs: + module.fail_json(msg="You can only set custom_attrs when instantiate service!") + + if not service_id: + service_id = get_service_id_by_name(module, auth, service_name) + # The task should be failed when we want to manage a non-existent service identified by its name + if not service_id and state == 'present': + module.fail_json(msg="There is no service with name: " + service_name) + + if state == 'absent': + result = delete_service(module, auth, service_id) + else: + result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_template.py b/ansible_collections/community/general/plugins/modules/one_template.py new file mode 100644 index 000000000..97d0f856e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_template.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021, Georg Gadinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_template + +short_description: Manages OpenNebula templates + +version_added: 2.4.0 + +requirements: + - pyone + +description: + - "Manages OpenNebula templates." + +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. + diff_mode: + support: none + +options: + id: + description: + - A I(id) of the template you would like to manage. If not set then a + - new template will be created with the given I(name). + type: int + name: + description: + - A I(name) of the template you would like to manage. If a template with + - the given name does not exist it will be created, otherwise it will be + - managed by this module. + type: str + template: + description: + - A string containing the template contents. + type: str + state: + description: + - C(present) - state that is used to manage the template. + - C(absent) - delete the template. + choices: ["present", "absent"] + default: present + type: str + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + +author: + - "Georg Gadinger (@nilsding)" +''' + +EXAMPLES = ''' +- name: Fetch the TEMPLATE by id + community.general.one_template: + id: 6459 + register: result + +- name: Print the TEMPLATE properties + ansible.builtin.debug: + var: result + +- name: Fetch the TEMPLATE by name + community.general.one_template: + name: tf-prd-users-workerredis-p6379a + register: result + +- name: Create a new or update an existing TEMPLATE + community.general.one_template: + name: generic-opensuse + template: | + CONTEXT = [ + HOSTNAME = "generic-opensuse" + ] + CPU = "1" + CUSTOM_ATTRIBUTE = "" + DISK = [ + CACHE = "writeback", + DEV_PREFIX = "sd", + DISCARD = "unmap", + IMAGE = "opensuse-leap-15.2", + IMAGE_UNAME = "oneadmin", + IO = "threads", + SIZE = "" ] + MEMORY = "2048" + NIC = [ + MODEL = "virtio", + NETWORK = "testnet", + NETWORK_UNAME = "oneadmin" ] + OS = [ + ARCH = "x86_64", + BOOT = "disk0" ] + SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\"" + VCPU = "2" + +- name: Delete the TEMPLATE by id + community.general.one_template: + id: 6459 + state: absent +''' + +RETURN = ''' +id: + description: template id + type: int + returned: when I(state=present) + sample: 153 +name: + description: template name + type: str + returned: when I(state=present) + sample: app1 +template: + description: the parsed template + type: dict + returned: when I(state=present) +group_id: + description: template's group id + type: int + returned: when I(state=present) + sample: 1 +group_name: + description: template's group name + type: str + returned: when I(state=present) + sample: one-users +owner_id: + description: template's owner id + type: int + returned: when I(state=present) + sample: 143 +owner_name: + description: template's owner name + type: str + returned: when I(state=present) + sample: ansible-test +''' + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class TemplateModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str', required=False), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -3 means "Resources belonging to the user" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.templatepool.info(-3, -1, -1) + + for template in pool.VMTEMPLATE: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.template.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.template.delete(template.ID) + + return {'changed': True} + + +def main(): + TemplateModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/one_vm.py b/ansible_collections/community/general/plugins/modules/one_vm.py new file mode 100644 index 000000000..1bbf47466 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/one_vm.py @@ -0,0 +1,1725 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Milan Ilic +# Copyright (c) 2019, Jan Meerkamp +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_vm +short_description: Creates or terminates OpenNebula instances +description: + - Manages OpenNebula instances +requirements: + - pyone +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_url: + description: + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not + - transferred over the network unencrypted. + - If not set then the value of the C(ONE_URL) environment variable is used. + type: str + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set + - then the value of the C(ONE_USERNAME) environment variable is used. + type: str + api_password: + description: + - Password of the user to login into OpenNebula RPC server. If not set + - then the value of the C(ONE_PASSWORD) environment variable is used. + - if both I(api_username) or I(api_password) are not set, then it will try + - authenticate with ONE auth file. Default path is "~/.one/one_auth". + - Set environment variable C(ONE_AUTH) to override this path. + type: str + template_name: + description: + - Name of VM template to use to create a new instace + type: str + template_id: + description: + - ID of a VM template to use to create a new instance + type: int + vm_start_on_hold: + description: + - Set to true to put vm on hold while creating + default: false + type: bool + instance_ids: + description: + - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) + aliases: ['ids'] + type: list + elements: int + state: + description: + - C(present) - create instances from a template specified with C(template_id)/C(template_name). + - C(running) - run instances + - C(poweredoff) - power-off instances + - C(rebooted) - reboot instances + - C(absent) - terminate instances + choices: ["present", "absent", "running", "rebooted", "poweredoff"] + default: present + type: str + hard: + description: + - Reboot, power-off or terminate instances C(hard) + default: false + type: bool + wait: + description: + - Wait for the instance to reach its desired state before returning. Keep + - in mind if you are waiting for instance to be in running state it + - doesn't mean that you will be able to SSH on that machine only that + - boot process have started on that instance, see 'wait_for' example for + - details. + default: true + type: bool + wait_timeout: + description: + - How long before wait gives up, in seconds + default: 300 + type: int + attributes: + description: + - A dictionary of key/value attributes to add to new instances, or for + - setting C(state) of instances with these attributes. + - Keys are case insensitive and OpenNebula automatically converts them to upper case. + - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. + - C(#) character(s) can be appended to the C(NAME) and the module will automatically add + - indexes to the names of VMs. + - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... + - When used with C(count_attributes) and C(exact_count) the module will + - match the base name without the index part. + default: {} + type: dict + labels: + description: + - A list of labels to associate with new instances, or for setting + - C(state) of instances with these labels. + default: [] + type: list + elements: str + count_attributes: + description: + - A dictionary of key/value attributes that can only be used with + - C(exact_count) to determine how many nodes based on a specific + - attributes criteria should be deployed. This can be expressed in + - multiple ways and is shown in the EXAMPLES section. + type: dict + count_labels: + description: + - A list of labels that can only be used with C(exact_count) to determine + - how many nodes based on a specific labels criteria should be deployed. + - This can be expressed in multiple ways and is shown in the EXAMPLES + - section. + type: list + elements: str + count: + description: + - Number of instances to launch + default: 1 + type: int + exact_count: + description: + - Indicates how many instances that match C(count_attributes) and + - C(count_labels) parameters should be deployed. Instances are either + - created or terminated based on this value. + - NOTE':' Instances with the least IDs will be terminated first. + type: int + mode: + description: + - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + type: str + owner_id: + description: + - ID of the user which will be set as the owner of the instance + type: int + group_id: + description: + - ID of the group which will be set as the group of the instance + type: int + memory: + description: + - The size of the memory for new instances (in MB, GB, ...) + type: str + disk_size: + description: + - The size of the disk created for new instances (in MB, GB, TB,...). + - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is + - matched against the order specified in C(template_id)/C(template_name). + type: list + elements: str + cpu: + description: + - Percentage of CPU divided by 100 required for the new instance. Half a + - processor is written 0.5. + type: float + vcpu: + description: + - Number of CPUs (cores) new VM will have. + type: int + networks: + description: + - A list of dictionaries with network parameters. See examples for more details. + default: [] + type: list + elements: dict + disk_saveas: + description: + - Creates an image from a VM disk. + - It is a dictionary where you have to specify C(name) of the new image. + - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. + - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) + - and the VM has to be in the C(poweredoff) state. + - Also this operation will fail if an image with specified C(name) already exists. + type: dict + persistent: + description: + - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. + default: false + type: bool + version_added: '0.2.0' + datastore_id: + description: + - Name of Datastore to use to create a new instace + version_added: '0.2.0' + type: int + datastore_name: + description: + - Name of Datastore to use to create a new instace + version_added: '0.2.0' + type: str + updateconf: + description: + - When I(instance_ids) is provided, updates running VMs with the C(updateconf) API call. + - When new VMs are being created, emulates the C(updateconf) API call via direct template merge. + - Allows for complete modifications of the C(CONTEXT) attribute. + type: dict + version_added: 6.3.0 +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +''' + + +EXAMPLES = ''' +- name: Create a new instance + community.general.one_vm: + template_id: 90 + register: result + +- name: Print VM properties + ansible.builtin.debug: + msg: result + +- name: Deploy a new VM on hold + community.general.one_vm: + template_name: 'app1_template' + vm_start_on_hold: 'True' + +- name: Deploy a new VM and set its name to 'foo' + community.general.one_vm: + template_name: 'app1_template' + attributes: + name: foo + +- name: Deploy a new VM and set its group_id and mode + community.general.one_vm: + template_id: 90 + group_id: 16 + mode: 660 + +- name: Deploy a new VM as persistent + community.general.one_vm: + template_id: 90 + persistent: true + +- name: Change VM's permissions to 640 + community.general.one_vm: + instance_ids: 5 + mode: 640 + +- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks + community.general.one_vm: + template_id: 15 + disk_size: 35.2 GB + memory: 4 GB + vcpu: 4 + count: 2 + networks: + - NETWORK_ID: 27 + - NETWORK: "default-network" + NETWORK_UNAME: "app-user" + SECURITY_GROUPS: "120,124" + - NETWORK_ID: 27 + SECURITY_GROUPS: "10" + +- name: Deploy a new instance which uses a Template with two Disks + community.general.one_vm: + template_id: 42 + disk_size: + - 35.2 GB + - 50 GB + memory: 4 GB + vcpu: 4 + count: 1 + networks: + - NETWORK_ID: 27 + +- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'" + community.general.one_vm: + template_id: 53 + attributes: + name: foo + bar: bar1 + +- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed" + community.general.one_vm: + template_id: 53 + attributes: + foo1: app1 + foo2: app2 + exact_count: 2 + count_attributes: + foo1: app1 + foo2: app2 + +- name: Enforce that 4 instances with an attribute 'bar' are deployed + community.general.one_vm: + template_id: 53 + attributes: + name: app + bar: bar2 + exact_count: 4 + count_attributes: + bar: + +# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##' +# Names will be: fooapp-00 and fooapp-01 +- name: Deploy 2 new instances + community.general.one_vm: + template_id: 53 + attributes: + name: fooapp-## + foo: bar + labels: + - app1 + - app2 + count: 2 + +# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###' +# Names will be: fooapp-002 and fooapp-003 +- name: Deploy 2 new instances + community.general.one_vm: + template_id: 53 + attributes: + name: fooapp-### + app: app1 + count: 2 + +# Reboot all instances with name in format 'fooapp-#' +# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted +- name: Reboot all instances with names in a certain format + community.general.one_vm: + attributes: + name: fooapp-# + state: rebooted + +# Enforce that only 1 instance with name in format 'fooapp-#' is deployed +# The task will delete oldest instances, so only the 'fooapp-003' will remain +- name: Enforce that only 1 instance with name in a certain format is deployed + community.general.one_vm: + template_id: 53 + exact_count: 1 + count_attributes: + name: fooapp-# + +- name: Deploy an new instance with a network + community.general.one_vm: + template_id: 53 + networks: + - NETWORK_ID: 27 + register: vm + +- name: Wait for SSH to come up + ansible.builtin.wait_for_connection: + delegate_to: '{{ vm.instances[0].networks[0].ip }}' + +- name: Terminate VMs by ids + community.general.one_vm: + instance_ids: + - 153 + - 160 + state: absent + +- name: Reboot all VMs that have labels 'foo' and 'app1' + community.general.one_vm: + labels: + - foo + - app1 + state: rebooted + +- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'" + community.general.one_vm: + attributes: + name: foo + app: bar + register: results + +- name: Deploy 2 new instances with labels 'foo1' and 'foo2' + community.general.one_vm: + template_name: app_template + labels: + - foo1 + - foo2 + count: 2 + +- name: Enforce that only 1 instance with label 'foo1' will be running + community.general.one_vm: + template_name: app_template + labels: + - foo1 + exact_count: 1 + count_labels: + - foo1 + +- name: Terminate all instances that have attribute foo + community.general.one_vm: + template_id: 53 + exact_count: 0 + count_attributes: + foo: + +- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'" + community.general.one_vm: + instance_ids: 351 + state: poweredoff + disk_saveas: + name: foo-image + +- name: "Save VM's disk with id=1 to the image with name 'bar-image'" + community.general.one_vm: + instance_ids: 351 + disk_saveas: + name: bar-image + disk_id: 1 + +- name: "Deploy 2 new instances with a custom 'start script'" + community.general.one_vm: + template_name: app_template + count: 2 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Add a custom 'start script' to a running VM" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Update SSH public keys inside the VM's context" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + SSH_PUBLIC_KEY: |- + ssh-rsa ... + ssh-ed25519 ... +''' + +RETURN = ''' +instances_ids: + description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. + type: list + returned: success + sample: [ 1234, 1235 ] +instances: + description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: int + sample: 153 + vm_name: + description: vm name + type: str + sample: foo + template_id: + description: vm's template id + type: int + sample: 153 + group_id: + description: vm's group id + type: int + sample: 1 + group_name: + description: vm's group name + type: str + sample: one-users + owner_id: + description: vm's owner id + type: int + sample: 143 + owner_name: + description: vm's owner name + type: str + sample: app-user + mode: + description: vm's mode + type: str + returned: success + sample: 660 + state: + description: state of an instance + type: str + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: str + sample: 20480 MB + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call. + type: dict + version_added: 6.3.0 + sample: { + "OS": { "ARCH": "x86_64" }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } +tagged_instances: + description: + - A list of instances info based on a specific attributes and/or + - labels that are specified with C(count_attributes) and C(count_labels) + - options. + type: complex + returned: success + contains: + vm_id: + description: vm id + type: int + sample: 153 + vm_name: + description: vm name + type: str + sample: foo + template_id: + description: vm's template id + type: int + sample: 153 + group_id: + description: vm's group id + type: int + sample: 1 + group_name: + description: vm's group name + type: str + sample: one-users + owner_id: + description: vm's user id + type: int + sample: 143 + owner_name: + description: vm's user name + type: str + sample: app-user + mode: + description: vm's mode + type: str + returned: success + sample: 660 + state: + description: state of an instance + type: str + sample: ACTIVE + lcm_state: + description: lcm state of an instance that is only relevant when the state is ACTIVE + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100 + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores) + type: int + sample: 2 + memory: + description: The size of the memory in MB + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB + type: list + sample: [ + "20480 MB", + "10240 MB" + ] + networks: + description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC + type: list + sample: [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance + type: list + sample: [ + "foo", + "spec-label" + ] + attributes: + description: A dictionary of key/values attributes that are associated with the instance + type: dict + sample: { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call + type: dict + version_added: 6.3.0 + sample: { + "OS": { "ARCH": "x86_64" }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } +''' + +try: + import pyone + HAS_PYONE = True +except ImportError: + HAS_PYONE = False + + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import dict_merge + +from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render + + +UPDATECONF_ATTRIBUTES = { + "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID"], + "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"], + "INPUT": ["TYPE", "BUS"], + "GRAPHICS": ["TYPE", "LISTEN", "PASSWD", "KEYMAP"], + "RAW": ["DATA", "DATA_VMX", "TYPE"], + "CONTEXT": [], +} + + +def check_updateconf(module, to_check): + '''Checks if attributes are compatible with one.vm.updateconf API call.''' + for attr, subattributes in to_check.items(): + if attr not in UPDATECONF_ATTRIBUTES: + module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr)) + if not UPDATECONF_ATTRIBUTES[attr]: + continue + for subattr in subattributes: + if subattr not in UPDATECONF_ATTRIBUTES[attr]: + module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr)) + + +def parse_updateconf(vm_template): + '''Extracts 'updateconf' attributes from a VM template.''' + updateconf = {} + for attr, subattributes in vm_template.items(): + if attr not in UPDATECONF_ATTRIBUTES: + continue + tmp = {} + for subattr, value in subattributes.items(): + if UPDATECONF_ATTRIBUTES[attr] and subattr not in UPDATECONF_ATTRIBUTES[attr]: + continue + tmp[subattr] = value + if tmp: + updateconf[attr] = tmp + return updateconf + + +def get_template(module, client, predicate): + + pool = client.templatepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all templates user can Use + found = 0 + found_template = None + template_name = '' + + for template in pool.VMTEMPLATE: + if predicate(template): + found = found + 1 + found_template = template + template_name = template.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more templates with name: ' + template_name) + return found_template + + +def get_template_by_name(module, client, template_name): + return get_template(module, client, lambda template: (template.NAME == template_name)) + + +def get_template_by_id(module, client, template_id): + return get_template(module, client, lambda template: (template.ID == template_id)) + + +def get_template_id(module, client, requested_id, requested_name): + template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name) + if template: + return template.ID + else: + return None + + +def get_datastore(module, client, predicate): + pool = client.datastorepool.info() + found = 0 + found_datastore = None + datastore_name = '' + + for datastore in pool.DATASTORE: + if predicate(datastore): + found = found + 1 + found_datastore = datastore + datastore_name = datastore.NAME + + if found == 0: + return None + elif found > 1: + module.fail_json(msg='There are more datastores with name: ' + datastore_name) + return found_datastore + + +def get_datastore_by_name(module, client, datastore_name): + return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name)) + + +def get_datastore_by_id(module, client, datastore_id): + return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id)) + + +def get_datastore_id(module, client, requested_id, requested_name): + datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name) + if datastore: + return datastore.ID + else: + return None + + +def get_vm_by_id(client, vm_id): + try: + vm = client.vm.info(int(vm_id)) + except BaseException: + return None + return vm + + +def get_vms_by_ids(module, client, state, ids): + vms = [] + + for vm_id in ids: + vm = get_vm_by_id(client, vm_id) + if vm is None and state != 'absent': + module.fail_json(msg='There is no VM with id=' + str(vm_id)) + vms.append(vm) + + return vms + + +def get_vm_info(client, vm): + + vm = client.vm.info(vm.ID) + + networks_info = [] + + disk_size = [] + if 'DISK' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['DISK'], list): + for disk in vm.TEMPLATE['DISK']: + disk_size.append(disk['SIZE'] + ' MB') + else: + disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB') + + if 'NIC' in vm.TEMPLATE: + if isinstance(vm.TEMPLATE['NIC'], list): + for nic in vm.TEMPLATE['NIC']: + networks_info.append({ + 'ip': nic.get('IP', ''), + 'mac': nic.get('MAC', ''), + 'name': nic.get('NETWORK', ''), + 'security_groups': nic.get('SECURITY_GROUPS', '') + }) + else: + networks_info.append({ + 'ip': vm.TEMPLATE['NIC'].get('IP', ''), + 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), + 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), + 'security_groups': + vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') + }) + import time + + current_time = time.localtime() + vm_start_time = time.localtime(vm.STIME) + + vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) + vm_uptime /= (60 * 60) + + permissions_str = parse_vm_permissions(client, vm) + + # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE + vm_lcm_state = None + if vm.STATE == VM_STATES.index('ACTIVE'): + vm_lcm_state = LCM_STATES[vm.LCM_STATE] + + vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) + + updateconf = parse_updateconf(vm.TEMPLATE) + + info = { + 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), + 'vm_id': vm.ID, + 'vm_name': vm.NAME, + 'state': VM_STATES[vm.STATE], + 'lcm_state': vm_lcm_state, + 'owner_name': vm.UNAME, + 'owner_id': vm.UID, + 'networks': networks_info, + 'disk_size': disk_size, + 'memory': vm.TEMPLATE['MEMORY'] + ' MB', + 'vcpu': vm.TEMPLATE['VCPU'], + 'cpu': vm.TEMPLATE['CPU'], + 'group_name': vm.GNAME, + 'group_id': vm.GID, + 'uptime_h': int(vm_uptime), + 'attributes': vm_attributes, + 'mode': permissions_str, + 'labels': vm_labels, + 'updateconf': updateconf, + } + + return info + + +def parse_vm_permissions(client, vm): + vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS + + owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A) + group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A) + other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A) + + permissions = str(owner_octal) + str(group_octal) + str(other_octal) + + return permissions + + +def set_vm_permissions(module, client, vms, permissions): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + old_permissions = parse_vm_permissions(client, vm) + changed = changed or old_permissions != permissions + + if not module.check_mode and old_permissions != permissions: + permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 + mode_bits = [int(d) for d in permissions_str] + try: + client.vm.chmod( + vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) + except pyone.OneAuthorizationException: + module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def set_vm_ownership(module, client, vms, owner_id, group_id): + changed = False + + for vm in vms: + vm = client.vm.info(vm.ID) + if owner_id is None: + owner_id = vm.UID + if group_id is None: + group_id = vm.GID + + changed = changed or owner_id != vm.UID or group_id != vm.GID + + if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID): + try: + client.vm.chown(vm.ID, owner_id, group_id) + except pyone.OneAuthorizationException: + module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") + + return changed + + +def update_vm(module, client, vm, updateconf_dict): + changed = False + if not updateconf_dict: + return changed + + before = client.vm.info(vm.ID).TEMPLATE + + client.vm.updateconf(vm.ID, render(updateconf_dict), 1) # 1: Merge new template with the existing one. + + after = client.vm.info(vm.ID).TEMPLATE + + changed = before != after + return changed + + +def update_vms(module, client, vms, *args): + changed = False + for vm in vms: + changed = update_vm(module, client, vm, *args) or changed + return changed + + +def get_size_in_MB(module, size_str): + + SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] + + s = size_str + init = size_str + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + symbol = s.strip() + + if symbol not in SYMBOLS: + module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num)) + + prefix = {'B': 1} + + for i, s in enumerate(SYMBOLS[1:]): + prefix[s] = 1 << (i + 1) * 10 + + size_in_bytes = int(num * prefix[symbol]) + size_in_MB = size_in_bytes / (1024 * 1024) + + return size_in_MB + + +def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict): + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + template = client.template.info(template_id).TEMPLATE + + disk_count = len(flatten(template.get('DISK', []))) + if disk_size: + size_count = len(flatten(disk_size)) + # check if the number of disks is correct + if disk_count != size_count: + module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count)) + + vm_extra_template = dict_merge(template or {}, attributes_dict or {}) + vm_extra_template = dict_merge(vm_extra_template, { + 'LABELS': ','.join(labels_list), + 'NIC': flatten(network_attrs_list, extract=True), + 'DISK': flatten([ + disk if not size else dict_merge(disk, { + 'SIZE': str(int(get_size_in_MB(module, size))), + }) + for disk, size in zip( + flatten(template.get('DISK', [])), + flatten(disk_size or [None] * disk_count), + ) + if disk is not None + ], extract=True) + }) + vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {}) + + try: + vm_id = client.template.instantiate(template_id, + vm_name, + vm_start_on_hold, + render(vm_extra_template), + vm_persistent) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + + vm = get_vm_by_id(client, vm_id) + return get_vm_info(client, vm) + + +def generate_next_index(vm_filled_indexes_list, num_sign_cnt): + counter = 0 + cnt_str = str(counter).zfill(num_sign_cnt) + + while cnt_str in vm_filled_indexes_list: + counter = counter + 1 + cnt_str = str(counter).zfill(num_sign_cnt) + + return cnt_str + + +def get_vm_labels_and_attributes_dict(client, vm_id): + vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE + + attrs_dict = {} + labels_list = [] + + for key, value in vm_USER_TEMPLATE.items(): + if key != 'LABELS': + attrs_dict[key] = value + else: + if key is not None and value is not None: + labels_list = value.split(',') + + return labels_list, attrs_dict + + +def get_all_vms_by_attributes(client, attributes_dict, labels_list): + pool = client.vmpool.info(-2, -1, -1, -1).VM + vm_list = [] + name = '' + if attributes_dict: + name = attributes_dict.pop('NAME', '') + + if name != '': + base_name = name[:len(name) - name.count('#')] + # Check does the name have indexed format + with_hash = name.endswith('#') + + for vm in pool: + if vm.NAME.startswith(base_name): + if with_hash and vm.NAME[len(base_name):].isdigit(): + # If the name has indexed format and after base_name it has only digits it'll be matched + vm_list.append(vm) + elif not with_hash and vm.NAME == name: + # If the name is not indexed it has to be same + vm_list.append(vm) + pool = vm_list + + import copy + + vm_list = copy.copy(pool) + + for vm in pool: + remove_list = [] + vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID) + + if attributes_dict and len(attributes_dict) > 0: + for key, val in attributes_dict.items(): + if key in vm_attributes_dict: + if val and vm_attributes_dict[key] != val: + remove_list.append(vm) + break + else: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + remove_list = [] + if labels_list and len(labels_list) > 0: + for label in labels_list: + if label not in vm_labels_list: + remove_list.append(vm) + break + vm_list = list(set(vm_list).difference(set(remove_list))) + + return vm_list + + +def create_count_of_vms(module, client, + template_id, count, + attributes_dict, labels_list, disk_size, network_attrs_list, + wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): + new_vms_list = [] + + vm_name = '' + if attributes_dict: + vm_name = attributes_dict.get('NAME', '') + + if module.check_mode: + return True, [], [] + + # Create list of used indexes + vm_filled_indexes_list = None + num_sign_cnt = vm_name.count('#') + if vm_name != '' and num_sign_cnt > 0: + vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None) + base_name = vm_name[:len(vm_name) - num_sign_cnt] + vm_name = base_name + # Make list which contains used indexes in format ['000', '001',...] + vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) + + while count > 0: + new_vm_name = vm_name + # Create indexed name + if vm_filled_indexes_list is not None: + next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt) + vm_filled_indexes_list.append(next_index) + new_vm_name += next_index + # Update NAME value in the attributes in case there is index + attributes_dict['NAME'] = new_vm_name + new_vm_dict = create_vm(module, client, + template_id, attributes_dict, labels_list, disk_size, network_attrs_list, + vm_start_on_hold, vm_persistent, updateconf_dict) + new_vm_id = new_vm_dict.get('vm_id') + new_vm = get_vm_by_id(client, new_vm_id) + new_vms_list.append(new_vm) + count -= 1 + + if vm_start_on_hold: + if wait: + for vm in new_vms_list: + wait_for_hold(module, client, vm, wait_timeout) + else: + if wait: + for vm in new_vms_list: + wait_for_running(module, client, vm, wait_timeout) + + return True, new_vms_list, [] + + +def create_exact_count_of_vms(module, client, + template_id, exact_count, attributes_dict, count_attributes_dict, + labels_list, count_labels_list, disk_size, network_attrs_list, + hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): + vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) + + vm_count_diff = exact_count - len(vm_list) + changed = vm_count_diff != 0 + + new_vms_list = [] + instances_list = [] + tagged_instances_list = vm_list + + if module.check_mode: + return changed, instances_list, tagged_instances_list + + if vm_count_diff > 0: + # Add more VMs + changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, + labels_list, disk_size, network_attrs_list, wait, wait_timeout, + vm_start_on_hold, vm_persistent, updateconf_dict) + + tagged_instances_list += instances_list + elif vm_count_diff < 0: + # Delete surplus VMs + old_vms_list = [] + + while vm_count_diff < 0: + old_vm = vm_list.pop(0) + old_vms_list.append(old_vm) + terminate_vm(module, client, old_vm, hard) + vm_count_diff += 1 + + if wait: + for vm in old_vms_list: + wait_for_done(module, client, vm, wait_timeout) + + instances_list = old_vms_list + # store only the remaining instances + old_vms_set = set(old_vms_list) + tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set] + + return changed, instances_list, tagged_instances_list + + +VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] +LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', + 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME', + 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF', + 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC', + 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] + + +def wait_for_state(module, client, vm, wait_timeout, state_predicate): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + vm = client.vm.info(vm.ID) + state = vm.STATE + lcm_state = vm.LCM_STATE + + if state_predicate(state, lcm_state): + return vm + elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), + VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]: + module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) + + time.sleep(1) + + module.fail_json(msg="Wait timeout has expired!") + + +def wait_for_running(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, + lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) + + +def wait_for_done(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) + + +def wait_for_hold(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) + + +def wait_for_poweroff(module, client, vm, wait_timeout): + return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) + + +def terminate_vm(module, client, vm, hard=False): + changed = False + + if not vm: + return changed + + changed = True + + if not module.check_mode: + if hard: + client.vm.action('terminate-hard', vm.ID) + else: + client.vm.action('terminate', vm.ID) + + return changed + + +def terminate_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = terminate_vm(module, client, vm, hard) or changed + + return changed + + +def poweroff_vm(module, client, vm, hard): + vm = client.vm.info(vm.ID) + changed = False + + lcm_state = vm.LCM_STATE + state = vm.STATE + + if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + changed = True + + if changed and not module.check_mode: + if not hard: + client.vm.action('poweroff', vm.ID) + else: + client.vm.action('poweroff-hard', vm.ID) + + return changed + + +def poweroff_vms(module, client, vms, hard): + changed = False + + for vm in vms: + changed = poweroff_vm(module, client, vm, hard) or changed + + return changed + + +def reboot_vms(module, client, vms, wait_timeout, hard): + + if not module.check_mode: + # Firstly, power-off all instances + for vm in vms: + vm = client.vm.info(vm.ID) + lcm_state = vm.LCM_STATE + state = vm.STATE + if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: + poweroff_vm(module, client, vm, hard) + + # Wait for all to be power-off + for vm in vms: + wait_for_poweroff(module, client, vm, wait_timeout) + + for vm in vms: + resume_vm(module, client, vm) + + return True + + +def resume_vm(module, client, vm): + vm = client.vm.info(vm.ID) + changed = False + + state = vm.STATE + if state in [VM_STATES.index('HOLD')]: + changed = release_vm(module, client, vm) + return changed + + lcm_state = vm.LCM_STATE + if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): + module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + + "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") + if lcm_state not in [LCM_STATES.index('RUNNING')]: + changed = True + + if changed and not module.check_mode: + client.vm.action('resume', vm.ID) + + return changed + + +def resume_vms(module, client, vms): + changed = False + + for vm in vms: + changed = resume_vm(module, client, vm) or changed + + return changed + + +def release_vm(module, client, vm): + vm = client.vm.info(vm.ID) + changed = False + + state = vm.STATE + if state != VM_STATES.index('HOLD'): + module.fail_json(msg="Cannot perform action 'release' because this action is not available " + + "because VM is not in state 'HOLD'.") + else: + changed = True + + if changed and not module.check_mode: + client.vm.action('release', vm.ID) + + return changed + + +def check_name_attribute(module, attributes): + if attributes.get("NAME"): + import re + if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: + module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") + + +TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS", + "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST", + "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"] + + +def check_attributes(module, attributes): + for key in attributes.keys(): + if key in TEMPLATE_RESTRICTED_ATTRIBUTES: + module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.') + # Check the format of the name attribute + check_name_attribute(module, attributes) + + +def disk_save_as(module, client, vm, disk_saveas, wait_timeout): + if not disk_saveas.get('name'): + module.fail_json(msg="Key 'name' is required for 'disk_saveas' option") + + image_name = disk_saveas.get('name') + disk_id = disk_saveas.get('disk_id', 0) + + if not module.check_mode: + if vm.STATE != VM_STATES.index('POWEROFF'): + module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") + try: + client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1) + except pyone.OneException as e: + module.fail_json(msg=str(e)) + wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state + + +def get_connection_info(module): + + url = module.params.get('api_url') + username = module.params.get('api_username') + password = module.params.get('api_password') + + if not url: + url = os.environ.get('ONE_URL') + + if not username: + username = os.environ.get('ONE_USERNAME') + + if not password: + password = os.environ.get('ONE_PASSWORD') + + if not username: + if not password: + authfile = os.environ.get('ONE_AUTH') + if authfile is None: + authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") + try: + with open(authfile, "r") as fp: + authstring = fp.read().rstrip() + username = authstring.split(":")[0] + password = authstring.split(":")[1] + except (OSError, IOError): + module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile)) + except Exception: + module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile)) + if not url: + module.fail_json(msg="Opennebula API url (api_url) is not specified") + from collections import namedtuple + + auth_params = namedtuple('auth', ('url', 'username', 'password')) + + return auth_params(url=url, username=username, password=password) + + +def main(): + fields = { + "api_url": {"required": False, "type": "str"}, + "api_username": {"required": False, "type": "str"}, + "api_password": {"required": False, "type": "str", "no_log": True}, + "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, + "template_name": {"required": False, "type": "str"}, + "template_id": {"required": False, "type": "int"}, + "vm_start_on_hold": {"default": False, "type": "bool"}, + "state": { + "default": "present", + "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], + "type": "str" + }, + "mode": {"required": False, "type": "str"}, + "owner_id": {"required": False, "type": "int"}, + "group_id": {"required": False, "type": "int"}, + "wait": {"default": True, "type": "bool"}, + "wait_timeout": {"default": 300, "type": "int"}, + "hard": {"default": False, "type": "bool"}, + "memory": {"required": False, "type": "str"}, + "cpu": {"required": False, "type": "float"}, + "vcpu": {"required": False, "type": "int"}, + "disk_size": {"required": False, "type": "list", "elements": "str"}, + "datastore_name": {"required": False, "type": "str"}, + "datastore_id": {"required": False, "type": "int"}, + "networks": {"default": [], "type": "list", "elements": "dict"}, + "count": {"default": 1, "type": "int"}, + "exact_count": {"required": False, "type": "int"}, + "attributes": {"default": {}, "type": "dict"}, + "count_attributes": {"required": False, "type": "dict"}, + "labels": {"default": [], "type": "list", "elements": "str"}, + "count_labels": {"required": False, "type": "list", "elements": "str"}, + "disk_saveas": {"type": "dict"}, + "persistent": {"default": False, "type": "bool"}, + "updateconf": {"type": "dict"}, + } + + module = AnsibleModule(argument_spec=fields, + mutually_exclusive=[ + ['template_id', 'template_name', 'instance_ids'], + ['template_id', 'template_name', 'disk_saveas'], + ['instance_ids', 'count_attributes', 'count'], + ['instance_ids', 'count_labels', 'count'], + ['instance_ids', 'exact_count'], + ['instance_ids', 'attributes'], + ['instance_ids', 'labels'], + ['disk_saveas', 'attributes'], + ['disk_saveas', 'labels'], + ['exact_count', 'count'], + ['count', 'hard'], + ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], + ['instance_ids', 'memory'], ['instance_ids', 'disk_size'], + ['instance_ids', 'networks'], + ['persistent', 'disk_size'] + ], + supports_check_mode=True) + + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') + + auth = get_connection_info(module) + params = module.params + instance_ids = params.get('instance_ids') + requested_template_name = params.get('template_name') + requested_template_id = params.get('template_id') + put_vm_on_hold = params.get('vm_start_on_hold') + state = params.get('state') + permissions = params.get('mode') + owner_id = params.get('owner_id') + group_id = params.get('group_id') + wait = params.get('wait') + wait_timeout = params.get('wait_timeout') + hard = params.get('hard') + memory = params.get('memory') + cpu = params.get('cpu') + vcpu = params.get('vcpu') + disk_size = params.get('disk_size') + requested_datastore_id = params.get('datastore_id') + requested_datastore_name = params.get('datastore_name') + networks = params.get('networks') + count = params.get('count') + exact_count = params.get('exact_count') + attributes = params.get('attributes') + count_attributes = params.get('count_attributes') + labels = params.get('labels') + count_labels = params.get('count_labels') + disk_saveas = params.get('disk_saveas') + persistent = params.get('persistent') + updateconf = params.get('updateconf') + + if not (auth.username and auth.password): + module.warn("Credentials missing") + else: + one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + + if attributes: + attributes = dict((key.upper(), value) for key, value in attributes.items()) + check_attributes(module, attributes) + + if count_attributes: + count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + if not attributes: + import copy + module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') + attributes = copy.copy(count_attributes) + check_attributes(module, count_attributes) + + if updateconf: + check_updateconf(module, updateconf) + + if count_labels and not labels: + module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') + labels = count_labels + + # Fetch template + template_id = None + if requested_template_id is not None or requested_template_name: + template_id = get_template_id(module, one_client, requested_template_id, requested_template_name) + if template_id is None: + if requested_template_id is not None: + module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) + elif requested_template_name: + module.fail_json(msg="There is no template with name: " + requested_template_name) + + # Fetch datastore + datastore_id = None + if requested_datastore_id or requested_datastore_name: + datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name) + if datastore_id is None: + if requested_datastore_id: + module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id)) + elif requested_datastore_name: + module.fail_json(msg="There is no datastore with name: " + requested_datastore_name) + else: + attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id) + + if exact_count and template_id is None: + module.fail_json(msg='Option `exact_count` needs template_id or template_name') + + if exact_count is not None and not (count_attributes or count_labels): + module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.') + if (count_attributes or count_labels) and exact_count is None: + module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.') + if template_id is not None and state != 'present': + module.fail_json(msg="Only state 'present' is valid for the template") + + if memory: + attributes['MEMORY'] = str(int(get_size_in_MB(module, memory))) + if cpu: + attributes['CPU'] = str(cpu) + if vcpu: + attributes['VCPU'] = str(vcpu) + + if exact_count is not None and state != 'present': + module.fail_json(msg='The `exact_count` option is valid only for the `present` state') + if exact_count is not None and exact_count < 0: + module.fail_json(msg='`exact_count` cannot be less than 0') + if count <= 0: + module.fail_json(msg='`count` has to be greater than 0') + + if permissions is not None: + import re + if re.match("^[0-7]{3}$", permissions) is None: + module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600") + + if exact_count is not None: + # Deploy an exact count of VMs + changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, + count_attributes, labels, count_labels, disk_size, + networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf) + vms = tagged_instances_list + elif template_id is not None and state == 'present': + # Deploy count VMs + changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, + attributes, labels, disk_size, networks, wait, wait_timeout, + put_vm_on_hold, persistent, updateconf) + # instances_list - new instances + # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` + vms = instances_list + else: + # Fetch data of instances, or change their state + if not (instance_ids or attributes or labels): + module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!") + + if memory or cpu or vcpu or disk_size or networks: + module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!") + + if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: + module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") + + vms = [] + tagged = False + changed = False + + if instance_ids: + vms = get_vms_by_ids(module, one_client, state, instance_ids) + else: + tagged = True + vms = get_all_vms_by_attributes(one_client, attributes, labels) + + if len(vms) == 0 and state != 'absent' and state != 'present': + module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') + + if len(vms) == 0 and state == 'present' and not tagged: + module.fail_json(msg='There are no instances with specified `instance_ids`.') + + if tagged and state == 'absent': + module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') + + if state == 'absent': + changed = terminate_vms(module, one_client, vms, hard) + elif state == 'rebooted': + changed = reboot_vms(module, one_client, vms, wait_timeout, hard) + elif state == 'poweredoff': + changed = poweroff_vms(module, one_client, vms, hard) + elif state == 'running': + changed = resume_vms(module, one_client, vms) + + instances_list = vms + tagged_instances_list = [] + + if permissions is not None: + changed = set_vm_permissions(module, one_client, vms, permissions) or changed + + if owner_id is not None or group_id is not None: + changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed + + if template_id is None and updateconf is not None: + changed = update_vms(module, one_client, vms, updateconf) or changed + + if wait and not module.check_mode and state != 'present': + wait_for = { + 'absent': wait_for_done, + 'rebooted': wait_for_running, + 'poweredoff': wait_for_poweroff, + 'running': wait_for_running + } + for vm in vms: + if vm is not None: + wait_for[state](module, one_client, vm, wait_timeout) + + if disk_saveas is not None: + if len(vms) == 0: + module.fail_json(msg="There is no VM whose disk will be saved.") + disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout) + changed = True + + # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option + instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None) + instances_ids = list(vm.ID for vm in instances_list if vm is not None) + # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) + tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None) + + result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py new file mode 100644 index 000000000..37dca74f2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py @@ -0,0 +1,580 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_firewall_policy +short_description: Configure 1&1 firewall policy +description: + - Create, remove, reconfigure, update firewall policies. + This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a firewall policy state to create, remove, or update. + required: false + type: str + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + required: false + name: + description: + - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. + maxLength=128 + type: str + firewall_policy: + description: + - The identifier (id or name) of the firewall policy used with update state. + type: str + rules: + description: + - A list of rules that will be set for the firewall policy. + Each rule must contain protocol parameter, in addition to three optional parameters + (port_from, port_to, and source) + type: list + elements: dict + default: [] + add_server_ips: + description: + - A list of server identifiers (id or name) to be assigned to a firewall policy. + Used in combination with update state. + type: list + elements: str + required: false + default: [] + remove_server_ips: + description: + - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. + type: list + elements: str + required: false + default: [] + add_rules: + description: + - A list of rules that will be added to an existing firewall policy. + It is syntax is the same as the one used for rules parameter. Used in combination with update state. + type: list + elements: dict + required: false + default: [] + remove_rules: + description: + - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. + type: list + elements: str + required: false + default: [] + description: + description: + - Firewall policy description. maxLength=256 + type: str + required: false + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + type: int + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +''' + +EXAMPLES = ''' +- name: Create a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + name: ansible-firewall-policy + description: Testing creation of firewall policies with ansible + rules: + - + protocol: TCP + port_from: 80 + port_to: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- name: Destroy a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible-firewall-policy + +- name: Update a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + state: update + firewall_policy: ansible-firewall-policy + name: ansible-firewall-policy-updated + description: Testing creation of firewall policies with ansible - updated + +- name: Add server to a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + add_server_ips: + - server_identifier (id or name) + - server_identifier #2 (id or name) + wait: true + wait_timeout: 500 + state: update + +- name: Remove server from a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) + wait: true + wait_timeout: 500 + state: update + +- name: Add rules to a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + description: Adding rules to an existing firewall policy + add_rules: + - + protocol: TCP + port_from: 70 + port_to: 70 + source: 0.0.0.0 + - + protocol: TCP + port_from: 60 + port_to: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +- name: Remove rules from a firewall policy + community.general.oneandone_firewall_policy: + auth_token: oneandone_private_api_key + firewall_policy: ansible-firewall-policy-updated + remove_rules: + - rule_id #1 + - rule_id #2 + - ... + wait: true + wait_timeout: 500 + state: update +''' + +RETURN = ''' +firewall_policy: + description: Information about the firewall policy that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_firewall_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, firewall_id, server_ids): + """ + Assigns servers to a firewall policy. + """ + try: + attach_servers = [] + + for _server_id in server_ids: + server = get_server(oneandone_conn, _server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + firewall_policy = oneandone_conn.attach_server_firewall_policy( + firewall_id=firewall_id, + server_ips=attach_servers) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id): + """ + Unassigns a server/IP from a firewall policy. + """ + try: + if module.check_mode: + firewall_server = oneandone_conn.get_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + if firewall_server: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_server( + firewall_id=firewall_id, + server_ip_id=server_ip_id) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): + """ + Adds new rules to a firewall policy. + """ + try: + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + if module.check_mode: + firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) + if (firewall_rules and firewall_policy_id): + return True + return False + + firewall_policy = oneandone_conn.add_firewall_policy_rule( + firewall_id=firewall_id, + firewall_policy_rules=firewall_rules + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id): + """ + Removes a rule from a firewall policy. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_firewall_policy_rule( + firewall_id=firewall_id, + rule_id=rule_id) + if rule: + return True + return False + + firewall_policy = oneandone_conn.remove_firewall_rule( + firewall_id=firewall_id, + rule_id=rule_id + ) + return firewall_policy + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_firewall_policy(module, oneandone_conn): + """ + Updates a firewall policy based on input arguments. + Firewall rules and server ips can be added/removed to/from + firewall policy. Firewall policy name and description can be + updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + firewall_policy_id = module.params.get('firewall_policy') + name = module.params.get('name') + description = module.params.get('description') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True) + if firewall_policy is None: + _check_mode(module, False) + + if name or description: + _check_mode(module, True) + firewall_policy = oneandone_conn.modify_firewall( + firewall_id=firewall_policy['id'], + name=name, + description=description) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + firewall_policy['id'], + add_server_ips)) + + firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + + _remove_firewall_server(module, + oneandone_conn, + firewall_policy['id'], + server_ip_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + if add_rules: + firewall_policy = _add_firewall_rules(module, + oneandone_conn, + firewall_policy['id'], + add_rules) + _check_mode(module, firewall_policy) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + + _remove_firewall_rule(module, + oneandone_conn, + firewall_policy['id'], + rule_id) + _check_mode(module, chk_changed) + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) + changed = True + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def create_firewall_policy(module, oneandone_conn): + """ + Create a new firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + firewall_rules = [] + + for rule in rules: + firewall_rule = oneandone.client.FirewallPolicyRule( + protocol=rule['protocol'], + port_from=rule['port_from'], + port_to=rule['port_to'], + source=rule['source']) + firewall_rules.append(firewall_rule) + + firewall_policy_obj = oneandone.client.FirewallPolicy( + name=name, + description=description + ) + + _check_mode(module, True) + firewall_policy = oneandone_conn.create_firewall_policy( + firewall_policy=firewall_policy_obj, + firewall_policy_rules=firewall_rules + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.firewall_policy, + firewall_policy['id'], + wait_timeout, + wait_interval) + + firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh + changed = True if firewall_policy else False + + _check_mode(module, False) + + return (changed, firewall_policy) + except Exception as e: + module.fail_json(msg=str(e)) + + +def remove_firewall_policy(module, oneandone_conn): + """ + Removes a firewall policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + fp_id = module.params.get('name') + firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id) + if module.check_mode: + if firewall_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id) + + changed = True if firewall_policy else False + + return (changed, { + 'id': firewall_policy['id'], + 'name': firewall_policy['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + firewall_policy=dict(type='str'), + description=dict(type='str'), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a firewall policy.") + try: + (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'update': + if not module.params.get('firewall_policy'): + module.fail_json( + msg="'firewall_policy' parameter is required to update a firewall policy.") + try: + (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + for param in ('name', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new firewall policies." % param) + try: + (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, firewall_policy=firewall_policy) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py new file mode 100644 index 000000000..7f7af9c4f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_load_balancer +short_description: Configure 1&1 load balancer +description: + - Create, remove, update load balancers. + This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a load balancer state to create, remove, or update. + type: str + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + load_balancer: + description: + - The identifier (id or name) of the load balancer used with update state. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + required: false + name: + description: + - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. + maxLength=128 + type: str + health_check_test: + description: + - Type of the health check. At the moment, HTTP is not allowed. + type: str + choices: [ "NONE", "TCP", "HTTP", "ICMP" ] + health_check_interval: + description: + - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 + type: str + health_check_path: + description: + - Url to call for checking. Required for HTTP health check. maxLength=1000 + type: str + required: false + health_check_parse: + description: + - Regular expression to check. Required for HTTP health check. maxLength=64 + type: str + required: false + persistence: + description: + - Persistence. + type: bool + persistence_time: + description: + - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 + type: str + method: + description: + - Balancing procedure. + type: str + choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] + datacenter: + description: + - ID or country code of the datacenter where the load balancer will be created. + - If not specified, it defaults to I(US). + type: str + choices: [ "US", "ES", "DE", "GB" ] + required: false + rules: + description: + - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, + port_balancer, and port_server parameters, in addition to source parameter, which is optional. + type: list + elements: dict + default: [] + description: + description: + - Description of the load balancer. maxLength=256 + type: str + required: false + add_server_ips: + description: + - A list of server identifiers (id or name) to be assigned to a load balancer. + Used in combination with update state. + type: list + elements: str + required: false + default: [] + remove_server_ips: + description: + - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. + type: list + elements: str + required: false + default: [] + add_rules: + description: + - A list of rules that will be added to an existing load balancer. + It is syntax is the same as the one used for rules parameter. Used in combination with update state. + type: list + elements: dict + required: false + default: [] + remove_rules: + description: + - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. + type: list + elements: str + required: false + default: [] + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + type: int + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' +- name: Create a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + description: Testing creation of load balancer with ansible + health_check_test: TCP + health_check_interval: 40 + persistence: true + persistence_time: 1200 + method: ROUND_ROBIN + datacenter: US + rules: + - + protocol: TCP + port_balancer: 80 + port_server: 80 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + +- name: Destroy a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + name: ansible load balancer + wait: true + wait_timeout: 500 + state: absent + +- name: Update a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer + name: ansible load balancer updated + description: Testing the update of a load balancer with ansible + wait: true + wait_timeout: 500 + state: update + +- name: Add server to a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding server to a load balancer with ansible + add_server_ips: + - server identifier (id or name) + wait: true + wait_timeout: 500 + state: update + +- name: Remove server from a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Removing server from a load balancer with ansible + remove_server_ips: + - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) + wait: true + wait_timeout: 500 + state: update + +- name: Add rules to a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + add_rules: + - + protocol: TCP + port_balancer: 70 + port_server: 70 + source: 0.0.0.0 + - + protocol: TCP + port_balancer: 60 + port_server: 60 + source: 0.0.0.0 + wait: true + wait_timeout: 500 + state: update + +- name: Remove rules from a load balancer + community.general.oneandone_load_balancer: + auth_token: oneandone_private_api_key + load_balancer: ansible load balancer updated + description: Adding rules to a load balancer with ansible + remove_rules: + - rule_id #1 + - rule_id #2 + - ... + wait: true + wait_timeout: 500 + state: update +''' + +RETURN = ''' +load_balancer: + description: Information about the load balancer that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_load_balancer, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] +HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] +METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): + """ + Assigns servers to a load balancer. + """ + try: + attach_servers = [] + + for server_id in server_ids: + server = get_server(oneandone_conn, server_id, True) + attach_server = oneandone.client.AttachServer( + server_id=server['id'], + server_ip_id=next(iter(server['ips'] or []), None)['id'] + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + load_balancer = oneandone_conn.attach_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ips=attach_servers) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): + """ + Unassigns a server/IP from a load balancer. + """ + try: + if module.check_mode: + lb_server = oneandone_conn.get_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + if lb_server: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_server( + load_balancer_id=load_balancer_id, + server_ip_id=server_ip_id) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): + """ + Adds new rules to a load_balancer. + """ + try: + load_balancer_rules = [] + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + if module.check_mode: + lb_id = get_load_balancer(oneandone_conn, load_balancer_id) + if (load_balancer_rules and lb_id): + return True + return False + + load_balancer = oneandone_conn.add_load_balancer_rule( + load_balancer_id=load_balancer_id, + load_balancer_rules=load_balancer_rules + ) + + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): + """ + Removes a rule from a load_balancer. + """ + try: + if module.check_mode: + rule = oneandone_conn.get_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id) + if rule: + return True + return False + + load_balancer = oneandone_conn.remove_load_balancer_rule( + load_balancer_id=load_balancer_id, + rule_id=rule_id + ) + return load_balancer + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_load_balancer(module, oneandone_conn): + """ + Updates a load_balancer based on input arguments. + Load balancer rules and server ips can be added/removed to/from + load balancer. Load balancer name, description, health_check_test, + health_check_interval, persistence, persistence_time, and method + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + load_balancer_id = module.params.get('load_balancer') + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + add_server_ips = module.params.get('add_server_ips') + remove_server_ips = module.params.get('remove_server_ips') + add_rules = module.params.get('add_rules') + remove_rules = module.params.get('remove_rules') + + changed = False + + load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) + if load_balancer is None: + _check_mode(module, False) + + if (name or description or health_check_test or health_check_interval or health_check_path or + health_check_parse or persistence or persistence_time or method): + _check_mode(module, True) + load_balancer = oneandone_conn.modify_load_balancer( + load_balancer_id=load_balancer['id'], + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + health_check_path=health_check_path, + health_check_parse=health_check_parse, + persistence=persistence, + persistence_time=persistence_time, + method=method) + changed = True + + if add_server_ips: + if module.check_mode: + _check_mode(module, _add_server_ips(module, + oneandone_conn, + load_balancer['id'], + add_server_ips)) + + load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) + changed = True + + if remove_server_ips: + chk_changed = False + for server_ip_id in remove_server_ips: + if module.check_mode: + chk_changed |= _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + + _remove_load_balancer_server(module, + oneandone_conn, + load_balancer['id'], + server_ip_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + if add_rules: + load_balancer = _add_load_balancer_rules(module, + oneandone_conn, + load_balancer['id'], + add_rules) + _check_mode(module, load_balancer) + changed = True + + if remove_rules: + chk_changed = False + for rule_id in remove_rules: + if module.check_mode: + chk_changed |= _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + + _remove_load_balancer_rule(module, + oneandone_conn, + load_balancer['id'], + rule_id) + _check_mode(module, chk_changed) + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) + changed = True + + try: + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_load_balancer(module, oneandone_conn): + """ + Create a new load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + health_check_test = module.params.get('health_check_test') + health_check_interval = module.params.get('health_check_interval') + health_check_path = module.params.get('health_check_path') + health_check_parse = module.params.get('health_check_parse') + persistence = module.params.get('persistence') + persistence_time = module.params.get('persistence_time') + method = module.params.get('method') + datacenter = module.params.get('datacenter') + rules = module.params.get('rules') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + load_balancer_rules = [] + + datacenter_id = None + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + for rule in rules: + load_balancer_rule = oneandone.client.LoadBalancerRule( + protocol=rule['protocol'], + port_balancer=rule['port_balancer'], + port_server=rule['port_server'], + source=rule['source']) + load_balancer_rules.append(load_balancer_rule) + + _check_mode(module, True) + load_balancer_obj = oneandone.client.LoadBalancer( + health_check_path=health_check_path, + health_check_parse=health_check_parse, + name=name, + description=description, + health_check_test=health_check_test, + health_check_interval=health_check_interval, + persistence=persistence, + persistence_time=persistence_time, + method=method, + datacenter_id=datacenter_id + ) + + load_balancer = oneandone_conn.create_load_balancer( + load_balancer=load_balancer_obj, + load_balancer_rules=load_balancer_rules + ) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.load_balancer, + load_balancer['id'], + wait_timeout, + wait_interval) + + load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh + changed = True if load_balancer else False + + _check_mode(module, False) + + return (changed, load_balancer) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_load_balancer(module, oneandone_conn): + """ + Removes a load_balancer. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + lb_id = module.params.get('name') + load_balancer_id = get_load_balancer(oneandone_conn, lb_id) + if module.check_mode: + if load_balancer_id is None: + _check_mode(module, False) + _check_mode(module, True) + load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) + + changed = True if load_balancer else False + + return (changed, { + 'id': load_balancer['id'], + 'name': load_balancer['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + load_balancer=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + health_check_test=dict( + choices=HEALTH_CHECK_TESTS), + health_check_interval=dict(type='str'), + health_check_path=dict(type='str'), + health_check_parse=dict(type='str'), + persistence=dict(type='bool'), + persistence_time=dict(type='str'), + method=dict( + choices=METHODS), + datacenter=dict( + choices=DATACENTERS), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a load balancer.") + try: + (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('load_balancer'): + module.fail_json( + msg="'load_balancer' parameter is required for updating a load balancer.") + try: + (changed, load_balancer) = update_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', + 'persistence_time', 'method', 'rules'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new load balancers." % param) + try: + (changed, load_balancer) = create_load_balancer(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, load_balancer=load_balancer) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py new file mode 100644 index 000000000..6118645bf --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py @@ -0,0 +1,1045 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_monitoring_policy +short_description: Configure 1&1 monitoring policy +description: + - Create, remove, update monitoring policies + (and add/remove ports, processes, and servers). + This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a monitoring policy's state to create, remove, update. + type: str + required: false + default: present + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + required: false + name: + description: + - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 + type: str + monitoring_policy: + description: + - The identifier (id or name) of the monitoring policy used with update state. + type: str + agent: + description: + - Set true for using agent. + type: str + email: + description: + - User's email. maxLength=128 + type: str + description: + description: + - Monitoring policy description. maxLength=256 + type: str + required: false + thresholds: + description: + - Monitoring policy thresholds. Each of the suboptions have warning and critical, + which both have alert and value suboptions. Warning is used to set limits for + warning alerts, critical is used to set critical alerts. alert enables alert, + and value is used to advise when the value is exceeded. + type: list + elements: dict + default: [] + suboptions: + cpu: + description: + - Consumption limits of CPU. + required: true + ram: + description: + - Consumption limits of RAM. + required: true + disk: + description: + - Consumption limits of hard disk. + required: true + internal_ping: + description: + - Response limits of internal ping. + required: true + transfer: + description: + - Consumption limits for transfer. + required: true + ports: + description: + - Array of ports that will be monitoring. + type: list + elements: dict + default: [] + suboptions: + protocol: + description: + - Internet protocol. + choices: [ "TCP", "UDP" ] + required: true + port: + description: + - Port number. minimum=1, maximum=65535 + required: true + alert_if: + description: + - Case of alert. + choices: [ "RESPONDING", "NOT_RESPONDING" ] + required: true + email_notification: + description: + - Set true for sending e-mail notifications. + required: true + processes: + description: + - Array of processes that will be monitoring. + type: list + elements: dict + default: [] + suboptions: + process: + description: + - Name of the process. maxLength=50 + required: true + alert_if: + description: + - Case of alert. + choices: [ "RUNNING", "NOT_RUNNING" ] + required: true + add_ports: + description: + - Ports to add to the monitoring policy. + type: list + elements: dict + required: false + default: [] + add_processes: + description: + - Processes to add to the monitoring policy. + type: list + elements: dict + required: false + default: [] + add_servers: + description: + - Servers to add to the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_ports: + description: + - Ports to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_processes: + description: + - Processes to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + remove_servers: + description: + - Servers to remove from the monitoring policy. + type: list + elements: str + required: false + default: [] + update_ports: + description: + - Ports to be updated on the monitoring policy. + type: list + elements: dict + required: false + default: [] + update_processes: + description: + - Processes to be updated on the monitoring policy. + type: list + elements: dict + required: false + default: [] + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + type: int + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +''' + +EXAMPLES = ''' +- name: Create a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + name: ansible monitoring policy + description: Testing creation of a monitoring policy with ansible + email: your@emailaddress.com + agent: true + thresholds: + - + cpu: + warning: + value: 80 + alert: false + critical: + value: 92 + alert: false + - + ram: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - + disk: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - + internal_ping: + warning: + value: 50 + alert: false + critical: + value: 100 + alert: false + - + transfer: + warning: + value: 1000 + alert: false + critical: + value: 2000 + alert: false + ports: + - + protocol: TCP + port: 22 + alert_if: RESPONDING + email_notification: false + processes: + - + process: test + alert_if: NOT_RUNNING + email_notification: false + wait: true + +- name: Destroy a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + state: absent + name: ansible monitoring policy + +- name: Update a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy + name: ansible monitoring policy updated + description: Testing creation of a monitoring policy with ansible updated + email: another@emailaddress.com + thresholds: + - + cpu: + warning: + value: 70 + alert: false + critical: + value: 90 + alert: false + - + ram: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - + disk: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - + internal_ping: + warning: + value: 60 + alert: false + critical: + value: 90 + alert: false + - + transfer: + warning: + value: 900 + alert: false + critical: + value: 1900 + alert: false + wait: true + state: update + +- name: Add a port to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_ports: + - + protocol: TCP + port: 33 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +- name: Update existing ports of a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_ports: + - + id: existing_port_id + protocol: TCP + port: 34 + alert_if: RESPONDING + email_notification: false + - + id: existing_port_id + protocol: TCP + port: 23 + alert_if: RESPONDING + email_notification: false + wait: true + state: update + +- name: Remove a port from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_ports: + - port_id + state: update + +- name: Add a process to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_processes: + - + process: test_2 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +- name: Update existing processes of a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + update_processes: + - + id: process_id + process: test_1 + alert_if: NOT_RUNNING + email_notification: false + - + id: process_id + process: test_3 + alert_if: NOT_RUNNING + email_notification: false + wait: true + state: update + +- name: Remove a process from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_processes: + - process_id + wait: true + state: update + +- name: Add server to a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + add_servers: + - server id or name + wait: true + state: update + +- name: Remove server from a monitoring policy + community.general.oneandone_monitoring_policy: + auth_token: oneandone_private_api_key + monitoring_policy: ansible monitoring policy updated + remove_servers: + - server01 + wait: true + state: update +''' + +RETURN = ''' +monitoring_policy: + description: Information about the monitoring policy that was processed + type: dict + sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_monitoring_policy, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_ports(module, oneandone_conn, monitoring_policy_id, ports): + """ + Adds new ports to a monitoring policy. + """ + try: + monitoring_policy_ports = [] + + for _port in ports: + monitoring_policy_port = oneandone.client.Port( + protocol=_port['protocol'], + port=_port['port'], + alert_if=_port['alert_if'], + email_notification=_port['email_notification'] + ) + monitoring_policy_ports.append(monitoring_policy_port) + + if module.check_mode: + if monitoring_policy_ports: + return True + return False + + monitoring_policy = oneandone_conn.add_port( + monitoring_policy_id=monitoring_policy_id, + ports=monitoring_policy_ports) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id): + """ + Removes a port from a monitoring policy. + """ + try: + if module.check_mode: + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if monitoring_policy: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port): + """ + Modifies a monitoring policy port. + """ + try: + if module.check_mode: + cm_port = oneandone_conn.get_monitoring_policy_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id) + if cm_port: + return True + return False + + monitoring_policy_port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=port['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_port( + monitoring_policy_id=monitoring_policy_id, + port_id=port_id, + port=monitoring_policy_port) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): + """ + Adds new processes to a monitoring policy. + """ + try: + monitoring_policy_processes = [] + + for _process in processes: + monitoring_policy_process = oneandone.client.Process( + process=_process['process'], + alert_if=_process['alert_if'], + email_notification=_process['email_notification'] + ) + monitoring_policy_processes.append(monitoring_policy_process) + + if module.check_mode: + mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) + if (monitoring_policy_processes and mp_id): + return True + return False + + monitoring_policy = oneandone_conn.add_process( + monitoring_policy_id=monitoring_policy_id, + processes=monitoring_policy_processes) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id): + """ + Removes a process from a monitoring policy. + """ + try: + if module.check_mode: + process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id + ) + if process: + return True + return False + + monitoring_policy = oneandone_conn.delete_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process): + """ + Modifies a monitoring policy process. + """ + try: + if module.check_mode: + cm_process = oneandone_conn.get_monitoring_policy_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id) + if cm_process: + return True + return False + + monitoring_policy_process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=process['email_notification'] + ) + + monitoring_policy = oneandone_conn.modify_process( + monitoring_policy_id=monitoring_policy_id, + process_id=process_id, + process=monitoring_policy_process) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): + """ + Attaches servers to a monitoring policy. + """ + try: + attach_servers = [] + + for _server_id in servers: + server_id = get_server(oneandone_conn, _server_id) + attach_server = oneandone.client.AttachServer( + server_id=server_id + ) + attach_servers.append(attach_server) + + if module.check_mode: + if attach_servers: + return True + return False + + monitoring_policy = oneandone_conn.attach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + servers=attach_servers) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id): + """ + Detaches a server from a monitoring policy. + """ + try: + if module.check_mode: + mp_server = oneandone_conn.get_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + if mp_server: + return True + return False + + monitoring_policy = oneandone_conn.detach_monitoring_policy_server( + monitoring_policy_id=monitoring_policy_id, + server_id=server_id) + return monitoring_policy + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def update_monitoring_policy(module, oneandone_conn): + """ + Updates a monitoring_policy based on input arguments. + Monitoring policy ports, processes and servers can be added/removed to/from + a monitoring policy. Monitoring policy name, description, email, + thresholds for cpu, ram, disk, transfer and internal_ping + can be updated as well. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + monitoring_policy_id = module.params.get('monitoring_policy') + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + thresholds = module.params.get('thresholds') + add_ports = module.params.get('add_ports') + update_ports = module.params.get('update_ports') + remove_ports = module.params.get('remove_ports') + add_processes = module.params.get('add_processes') + update_processes = module.params.get('update_processes') + remove_processes = module.params.get('remove_processes') + add_servers = module.params.get('add_servers') + remove_servers = module.params.get('remove_servers') + + changed = False + + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True) + if monitoring_policy is None: + _check_mode(module, False) + + _monitoring_policy = oneandone.client.MonitoringPolicy( + name=name, + description=description, + email=email + ) + + _thresholds = None + + if thresholds: + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for threshold in thresholds: + key = list(threshold.keys())[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + if name or description or email or thresholds: + _check_mode(module, True) + monitoring_policy = oneandone_conn.modify_monitoring_policy( + monitoring_policy_id=monitoring_policy['id'], + monitoring_policy=_monitoring_policy, + thresholds=_thresholds) + changed = True + + if add_ports: + if module.check_mode: + _check_mode(module, _add_ports(module, + oneandone_conn, + monitoring_policy['id'], + add_ports)) + + monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports) + changed = True + + if update_ports: + chk_changed = False + for update_port in update_ports: + if module.check_mode: + chk_changed |= _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + + _modify_port(module, + oneandone_conn, + monitoring_policy['id'], + update_port['id'], + update_port) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_ports: + chk_changed = False + for port_id in remove_ports: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + + _delete_monitoring_policy_port(module, + oneandone_conn, + monitoring_policy['id'], + port_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_processes: + monitoring_policy = _add_processes(module, + oneandone_conn, + monitoring_policy['id'], + add_processes) + _check_mode(module, monitoring_policy) + changed = True + + if update_processes: + chk_changed = False + for update_process in update_processes: + if module.check_mode: + chk_changed |= _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + + _modify_process(module, + oneandone_conn, + monitoring_policy['id'], + update_process['id'], + update_process) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if remove_processes: + chk_changed = False + for process_id in remove_processes: + if module.check_mode: + chk_changed |= _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + + _delete_monitoring_policy_process(module, + oneandone_conn, + monitoring_policy['id'], + process_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + if add_servers: + monitoring_policy = _attach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + add_servers) + _check_mode(module, monitoring_policy) + changed = True + + if remove_servers: + chk_changed = False + for _server_id in remove_servers: + server_id = get_server(oneandone_conn, _server_id) + + if module.check_mode: + chk_changed |= _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + + _detach_monitoring_policy_server(module, + oneandone_conn, + monitoring_policy['id'], + server_id) + _check_mode(module, chk_changed) + monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True) + changed = True + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_monitoring_policy(module, oneandone_conn): + """ + Creates a new monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + name = module.params.get('name') + description = module.params.get('description') + email = module.params.get('email') + agent = module.params.get('agent') + thresholds = module.params.get('thresholds') + ports = module.params.get('ports') + processes = module.params.get('processes') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + _monitoring_policy = oneandone.client.MonitoringPolicy(name, + description, + email, + agent, ) + + _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower() + + threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] + + _thresholds = [] + for threshold in thresholds: + key = list(threshold.keys())[0] + if key in threshold_entities: + _threshold = oneandone.client.Threshold( + entity=key, + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) + _thresholds.append(_threshold) + + _ports = [] + for port in ports: + _port = oneandone.client.Port( + protocol=port['protocol'], + port=port['port'], + alert_if=port['alert_if'], + email_notification=str(port['email_notification']).lower()) + _ports.append(_port) + + _processes = [] + for process in processes: + _process = oneandone.client.Process( + process=process['process'], + alert_if=process['alert_if'], + email_notification=str(process['email_notification']).lower()) + _processes.append(_process) + + _check_mode(module, True) + monitoring_policy = oneandone_conn.create_monitoring_policy( + monitoring_policy=_monitoring_policy, + thresholds=_thresholds, + ports=_ports, + processes=_processes + ) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.monitoring_policy, + monitoring_policy['id'], + wait_timeout, + wait_interval) + + changed = True if monitoring_policy else False + + _check_mode(module, False) + + return (changed, monitoring_policy) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_monitoring_policy(module, oneandone_conn): + """ + Removes a monitoring policy. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + mp_id = module.params.get('name') + monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id) + if module.check_mode: + if monitoring_policy_id is None: + _check_mode(module, False) + _check_mode(module, True) + monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id) + + changed = True if monitoring_policy else False + + return (changed, { + 'id': monitoring_policy['id'], + 'name': monitoring_policy['name'] + }) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + name=dict(type='str'), + monitoring_policy=dict(type='str'), + agent=dict(type='str'), + email=dict(type='str'), + description=dict(type='str'), + thresholds=dict(type='list', elements="dict", default=[]), + ports=dict(type='list', elements="dict", default=[]), + processes=dict(type='list', elements="dict", default=[]), + add_ports=dict(type='list', elements="dict", default=[]), + update_ports=dict(type='list', elements="dict", default=[]), + remove_ports=dict(type='list', elements="str", default=[]), + add_processes=dict(type='list', elements="dict", default=[]), + update_processes=dict(type='list', elements="dict", default=[]), + remove_processes=dict(type='list', elements="str", default=[]), + add_servers=dict(type='list', elements="str", default=[]), + remove_servers=dict(type='list', elements="str", default=[]), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required to delete a monitoring policy.") + try: + (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + elif state == 'update': + if not module.params.get('monitoring_policy'): + module.fail_json( + msg="'monitoring_policy' parameter is required to update a monitoring policy.") + try: + (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for a new monitoring policy." % param) + try: + (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, monitoring_policy=monitoring_policy) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py new file mode 100644 index 000000000..114bf2f22 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_private_network +short_description: Configure 1&1 private networking +description: + - Create, remove, reconfigure, update a private network. + This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a network's state to create, remove, or update. + type: str + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + private_network: + description: + - The identifier (id or name) of the network used with update state. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + required: false + name: + description: + - Private network name used with present state. Used as identifier (id or name) when used with absent state. + type: str + description: + description: + - Set a description for the network. + type: str + datacenter: + description: + - The identifier of the datacenter where the private network will be created + type: str + choices: [US, ES, DE, GB] + network_address: + description: + - Set a private network space, i.e. 192.168.1.0 + type: str + subnet_mask: + description: + - Set the netmask for the private network, i.e. 255.255.255.0 + type: str + add_members: + description: + - List of server identifiers (name or id) to be added to the private network. + type: list + elements: str + default: [] + remove_members: + description: + - List of server identifiers (name or id) to be removed from the private network. + type: list + elements: str + default: [] + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + type: int + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' +- name: Create a private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + name: backup_network + description: Testing creation of a private network with ansible + network_address: 70.35.193.100 + subnet_mask: 255.0.0.0 + datacenter: US + +- name: Destroy a private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: absent + name: backup_network + +- name: Modify the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + network_address: 192.168.2.0 + subnet_mask: 255.255.255.0 + +- name: Add members to the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + add_members: + - server identifier (id or name) + +- name: Remove members from the private network + community.general.oneandone_private_network: + auth_token: oneandone_private_api_key + state: update + private_network: backup_network + remove_members: + - server identifier (id or name) +''' + +RETURN = ''' +private_network: + description: Information about the private network. + type: dict + sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_private_network, + get_server, + get_datacenter, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _add_servers(module, oneandone_conn, name, members): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id and members: + return True + return False + + network = oneandone_conn.attach_private_network_servers( + private_network_id=private_network_id, + server_ids=members) + + return network + except Exception as e: + module.fail_json(msg=str(e)) + + +def _remove_member(module, oneandone_conn, name, member_id): + try: + private_network_id = get_private_network(oneandone_conn, name) + + if module.check_mode: + if private_network_id: + network_member = oneandone_conn.get_private_network_server( + private_network_id=private_network_id, + server_id=member_id) + if network_member: + return True + return False + + network = oneandone_conn.remove_private_network_server( + private_network_id=name, + server_id=member_id) + + return network + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def create_network(module, oneandone_conn): + """ + Create new private network + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any network was added. + """ + name = module.params.get('name') + description = module.params.get('description') + network_address = module.params.get('network_address') + subnet_mask = module.params.get('subnet_mask') + datacenter = module.params.get('datacenter') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + network = oneandone_conn.create_private_network( + private_network=oneandone.client.PrivateNetwork( + name=name, + description=description, + network_address=network_address, + subnet_mask=subnet_mask, + datacenter_id=datacenter_id + )) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.private_network, + network['id'], + wait_timeout, + wait_interval) + network = get_private_network(oneandone_conn, + network['id'], + True) + + changed = True if network else False + + _check_mode(module, False) + + return (changed, network) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_network(module, oneandone_conn): + """ + Modifies a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + """ + try: + _private_network_id = module.params.get('private_network') + _name = module.params.get('name') + _description = module.params.get('description') + _network_address = module.params.get('network_address') + _subnet_mask = module.params.get('subnet_mask') + _add_members = module.params.get('add_members') + _remove_members = module.params.get('remove_members') + + changed = False + + private_network = get_private_network(oneandone_conn, + _private_network_id, + True) + if private_network is None: + _check_mode(module, False) + + if _name or _description or _network_address or _subnet_mask: + _check_mode(module, True) + private_network = oneandone_conn.modify_private_network( + private_network_id=private_network['id'], + name=_name, + description=_description, + network_address=_network_address, + subnet_mask=_subnet_mask) + changed = True + + if _add_members: + instances = [] + + for member in _add_members: + instance_id = get_server(oneandone_conn, member) + instance_obj = oneandone.client.AttachServer(server_id=instance_id) + + instances.extend([instance_obj]) + private_network = _add_servers(module, oneandone_conn, private_network['id'], instances) + _check_mode(module, private_network) + changed = True + + if _remove_members: + chk_changed = False + for member in _remove_members: + instance = get_server(oneandone_conn, member, True) + + if module.check_mode: + chk_changed |= _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + _check_mode(module, instance and chk_changed) + + _remove_member(module, + oneandone_conn, + private_network['id'], + instance['id']) + private_network = get_private_network(oneandone_conn, + private_network['id'], + True) + changed = True + + return (changed, private_network) + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def remove_network(module, oneandone_conn): + """ + Removes a private network. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + """ + try: + pn_id = module.params.get('name') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + private_network_id = get_private_network(oneandone_conn, pn_id) + if module.check_mode: + if private_network_id is None: + _check_mode(module, False) + _check_mode(module, True) + private_network = oneandone_conn.delete_private_network(private_network_id) + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.private_network, + private_network['id'], + wait_timeout, + wait_interval) + + changed = True if private_network else False + + return (changed, { + 'id': private_network['id'], + 'name': private_network['name'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + private_network=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + network_address=dict(type='str'), + subnet_mask=dict(type='str'), + add_members=dict(type='list', elements="str", default=[]), + remove_members=dict(type='list', elements="str", default=[]), + datacenter=dict( + choices=DATACENTERS), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for deleting a network.") + try: + (changed, private_network) = remove_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('private_network'): + module.fail_json( + msg="'private_network' parameter is required for updating a network.") + try: + (changed, private_network) = update_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'present': + if not module.params.get('name'): + module.fail_json( + msg="'name' parameter is required for new networks.") + try: + (changed, private_network) = create_network(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, private_network=private_network) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py new file mode 100644 index 000000000..df5476feb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py @@ -0,0 +1,338 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_public_ip +short_description: Configure 1&1 public IPs +description: + - Create, update, and remove public IPs. + This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a public ip state to create, remove, or update. + type: str + required: false + default: 'present' + choices: [ "present", "absent", "update" ] + auth_token: + description: + - Authenticating API token provided by 1&1. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + required: false + reverse_dns: + description: + - Reverse DNS name. maxLength=256 + type: str + required: false + datacenter: + description: + - ID of the datacenter where the IP will be created (only for unassigned IPs). + type: str + choices: [US, ES, DE, GB] + default: US + required: false + type: + description: + - Type of IP. Currently, only IPV4 is available. + type: str + choices: ["IPV4", "IPV6"] + default: 'IPV4' + required: false + public_ip_id: + description: + - The ID of the public IP used with update and delete states. + type: str + wait: + description: + - wait for the instance to be in state 'running' before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the _wait_for methods + type: int + default: 5 + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - Amel Ajdinovic (@aajdinov) + - Ethan Devenport (@edevenport) +''' + +EXAMPLES = ''' +- name: Create a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + reverse_dns: example.com + datacenter: US + type: IPV4 + +- name: Update a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + reverse_dns: secondexample.com + state: update + +- name: Delete a public IP + community.general.oneandone_public_ip: + auth_token: oneandone_private_api_key + public_ip_id: public ip id + state: absent +''' + +RETURN = ''' +public_ip: + description: Information about the public ip that was processed + type: dict + sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' + returned: always +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_public_ip, + OneAndOneResources, + wait_for_resource_creation_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +TYPES = ['IPV4', 'IPV6'] + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def create_public_ip(module, oneandone_conn): + """ + Create new public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was added. + """ + reverse_dns = module.params.get('reverse_dns') + datacenter = module.params.get('datacenter') + ip_type = module.params.get('type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + if datacenter is not None: + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.create_public_ip( + reverse_dns=reverse_dns, + ip_type=ip_type, + datacenter_id=datacenter_id) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def update_public_ip(module, oneandone_conn): + """ + Update a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was changed. + """ + reverse_dns = module.params.get('reverse_dns') + public_ip_id = module.params.get('public_ip_id') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + public_ip = oneandone_conn.modify_public_ip( + ip_id=public_ip['id'], + reverse_dns=reverse_dns) + + if wait: + wait_for_resource_creation_completion(oneandone_conn, + OneAndOneResources.public_ip, + public_ip['id'], + wait_timeout, + wait_interval) + public_ip = oneandone_conn.get_public_ip(public_ip['id']) + + changed = True if public_ip else False + + return (changed, public_ip) + except Exception as e: + module.fail_json(msg=str(e)) + + +def delete_public_ip(module, oneandone_conn): + """ + Delete a public IP + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any public IP was deleted. + """ + public_ip_id = module.params.get('public_ip_id') + + public_ip = get_public_ip(oneandone_conn, public_ip_id, True) + if public_ip is None: + _check_mode(module, False) + module.fail_json( + msg='public IP %s not found.' % public_ip_id) + + try: + _check_mode(module, True) + deleted_public_ip = oneandone_conn.delete_public_ip( + ip_id=public_ip['id']) + + changed = True if deleted_public_ip else False + + return (changed, { + 'id': public_ip['id'] + }) + except Exception as e: + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', no_log=True, + default=os.environ.get('ONEANDONE_AUTH_TOKEN')), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + public_ip_id=dict(type='str'), + reverse_dns=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + type=dict( + choices=TYPES, + default='IPV4'), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'update']), + ), + supports_check_mode=True + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='auth_token parameter is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to delete a public ip.") + try: + (changed, public_ip) = delete_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + elif state == 'update': + if not module.params.get('public_ip_id'): + module.fail_json( + msg="'public_ip_id' parameter is required to update a public ip.") + try: + (changed, public_ip) = update_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + elif state == 'present': + try: + (changed, public_ip) = create_public_ip(module, oneandone_conn) + except Exception as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, public_ip=public_ip) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneandone_server.py b/ansible_collections/community/general/plugins/modules/oneandone_server.py new file mode 100644 index 000000000..59f504178 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneandone_server.py @@ -0,0 +1,704 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneandone_server +short_description: Create, destroy, start, stop, and reboot a 1&1 Host server +description: + - Create, destroy, update, start, stop, and reboot a 1&1 Host server. + When the server is created it can optionally wait for it to be 'running' before returning. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Define a server's state to create, remove, start or stop it. + type: str + default: present + choices: [ "present", "absent", "running", "stopped" ] + auth_token: + description: + - Authenticating API token provided by 1&1. Overrides the + ONEANDONE_AUTH_TOKEN environment variable. + type: str + api_url: + description: + - Custom API URL. Overrides the + ONEANDONE_API_URL environment variable. + type: str + datacenter: + description: + - The datacenter location. + type: str + default: US + choices: [ "US", "ES", "DE", "GB" ] + hostname: + description: + - The hostname or ID of the server. Only used when state is 'present'. + type: str + description: + description: + - The description of the server. + type: str + appliance: + description: + - The operating system name or ID for the server. + It is required only for 'present' state. + type: str + fixed_instance_size: + description: + - The instance size name or ID of the server. + It is required only for 'present' state, and it is mutually exclusive with + vcore, cores_per_processor, ram, and hdds parameters. + - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)' + type: str + vcore: + description: + - The total number of processors. + It must be provided with cores_per_processor, ram, and hdds parameters. + type: int + cores_per_processor: + description: + - The number of cores per processor. + It must be provided with vcore, ram, and hdds parameters. + type: int + ram: + description: + - The amount of RAM memory. + It must be provided with with vcore, cores_per_processor, and hdds parameters. + type: float + hdds: + description: + - A list of hard disks with nested "size" and "is_main" properties. + It must be provided with vcore, cores_per_processor, and ram parameters. + type: list + elements: dict + private_network: + description: + - The private network name or ID. + type: str + firewall_policy: + description: + - The firewall policy name or ID. + type: str + load_balancer: + description: + - The load balancer name or ID. + type: str + monitoring_policy: + description: + - The monitoring policy name or ID. + type: str + server: + description: + - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'. + type: str + count: + description: + - The number of servers to create. + type: int + default: 1 + ssh_key: + description: + - User's public SSH key (contents, not path). + type: raw + server_type: + description: + - The type of server to be built. + type: str + default: "cloud" + choices: [ "cloud", "baremetal", "k8s_node" ] + wait: + description: + - Wait for the server to be in state 'running' before returning. + Also used for delete operation (set to 'false' if you don't want to wait + for each individual server to be deleted before moving on with + other tasks.) + type: bool + default: true + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + wait_interval: + description: + - Defines the number of seconds to wait when using the wait_for methods + type: int + default: 5 + auto_increment: + description: + - When creating multiple servers at once, whether to differentiate + hostnames by appending a count after them or substituting the count + where there is a %02d or %03d in the hostname string. + type: bool + default: true + +requirements: + - "1and1" + - "python >= 2.6" + +author: + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" + +''' + +EXAMPLES = ''' +- name: Create three servers and enumerate their names + community.general.oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + fixed_instance_size: XL + datacenter: US + appliance: C5A349786169F140BCBC335675014C08 + auto_increment: true + count: 3 + +- name: Create three servers, passing in an ssh_key + community.general.oneandone_server: + auth_token: oneandone_private_api_key + hostname: node%02d + vcore: 2 + cores_per_processor: 4 + ram: 8.0 + hdds: + - size: 50 + is_main: false + datacenter: ES + appliance: C5A349786169F140BCBC335675014C08 + count: 3 + wait: true + wait_timeout: 600 + wait_interval: 10 + ssh_key: SSH_PUBLIC_KEY + +- name: Removing server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: absent + server: 'node01' + +- name: Starting server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: running + server: 'node01' + +- name: Stopping server + community.general.oneandone_server: + auth_token: oneandone_private_api_key + state: stopped + server: 'node01' +''' + +RETURN = ''' +servers: + description: Information about each server that was processed + type: list + sample: + - {"hostname": "my-server", "id": "server-id"} + returned: always +''' + +import os +import time +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.oneandone import ( + get_datacenter, + get_fixed_instance_size, + get_appliance, + get_private_network, + get_monitoring_policy, + get_firewall_policy, + get_load_balancer, + get_server, + OneAndOneResources, + wait_for_resource_creation_completion, + wait_for_resource_deletion_completion +) + +HAS_ONEANDONE_SDK = True + +try: + import oneandone.client +except ImportError: + HAS_ONEANDONE_SDK = False + +DATACENTERS = ['US', 'ES', 'DE', 'GB'] + +ONEANDONE_SERVER_STATES = ( + 'DEPLOYING', + 'POWERED_OFF', + 'POWERED_ON', + 'POWERING_ON', + 'POWERING_OFF', +) + + +def _check_mode(module, result): + if module.check_mode: + module.exit_json( + changed=result + ) + + +def _create_server(module, oneandone_conn, hostname, description, + fixed_instance_size_id, vcore, cores_per_processor, ram, + hdds, datacenter_id, appliance_id, ssh_key, + private_network_id, firewall_policy_id, load_balancer_id, + monitoring_policy_id, server_type, wait, wait_timeout, + wait_interval): + + try: + existing_server = get_server(oneandone_conn, hostname) + + if existing_server: + if module.check_mode: + return False + return None + + if module.check_mode: + return True + + server = oneandone_conn.create_server( + oneandone.client.Server( + name=hostname, + description=description, + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + appliance_id=appliance_id, + datacenter_id=datacenter_id, + rsa_key=ssh_key, + private_network_id=private_network_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + monitoring_policy_id=monitoring_policy_id, + server_type=server_type,), hdds) + + if wait: + wait_for_resource_creation_completion( + oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + + return server + except Exception as ex: + module.fail_json(msg=str(ex)) + + +def _insert_network_data(server): + for addr_data in server['ips']: + if addr_data['type'] == 'IPV6': + server['public_ipv6'] = addr_data['ip'] + elif addr_data['type'] == 'IPV4': + server['public_ipv4'] = addr_data['ip'] + return server + + +def create_server(module, oneandone_conn): + """ + Create new server + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object + + Returns a dictionary containing a 'changed' attribute indicating whether + any server was added, and a 'servers' attribute with the list of the + created servers' hostname, id and ip addresses. + """ + hostname = module.params.get('hostname') + description = module.params.get('description') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + fixed_instance_size = module.params.get('fixed_instance_size') + vcore = module.params.get('vcore') + cores_per_processor = module.params.get('cores_per_processor') + ram = module.params.get('ram') + hdds = module.params.get('hdds') + datacenter = module.params.get('datacenter') + appliance = module.params.get('appliance') + ssh_key = module.params.get('ssh_key') + private_network = module.params.get('private_network') + monitoring_policy = module.params.get('monitoring_policy') + firewall_policy = module.params.get('firewall_policy') + load_balancer = module.params.get('load_balancer') + server_type = module.params.get('server_type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + datacenter_id = get_datacenter(oneandone_conn, datacenter) + if datacenter_id is None: + _check_mode(module, False) + module.fail_json( + msg='datacenter %s not found.' % datacenter) + + fixed_instance_size_id = None + if fixed_instance_size: + fixed_instance_size_id = get_fixed_instance_size( + oneandone_conn, + fixed_instance_size) + if fixed_instance_size_id is None: + _check_mode(module, False) + module.fail_json( + msg='fixed_instance_size %s not found.' % fixed_instance_size) + + appliance_id = get_appliance(oneandone_conn, appliance) + if appliance_id is None: + _check_mode(module, False) + module.fail_json( + msg='appliance %s not found.' % appliance) + + private_network_id = None + if private_network: + private_network_id = get_private_network( + oneandone_conn, + private_network) + if private_network_id is None: + _check_mode(module, False) + module.fail_json( + msg='private network %s not found.' % private_network) + + monitoring_policy_id = None + if monitoring_policy: + monitoring_policy_id = get_monitoring_policy( + oneandone_conn, + monitoring_policy) + if monitoring_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='monitoring policy %s not found.' % monitoring_policy) + + firewall_policy_id = None + if firewall_policy: + firewall_policy_id = get_firewall_policy( + oneandone_conn, + firewall_policy) + if firewall_policy_id is None: + _check_mode(module, False) + module.fail_json( + msg='firewall policy %s not found.' % firewall_policy) + + load_balancer_id = None + if load_balancer: + load_balancer_id = get_load_balancer( + oneandone_conn, + load_balancer) + if load_balancer_id is None: + _check_mode(module, False) + module.fail_json( + msg='load balancer %s not found.' % load_balancer) + + if auto_increment: + hostnames = _auto_increment_hostname(count, hostname) + descriptions = _auto_increment_description(count, description) + else: + hostnames = [hostname] * count + descriptions = [description] * count + + hdd_objs = [] + if hdds: + for hdd in hdds: + hdd_objs.append(oneandone.client.Hdd( + size=hdd['size'], + is_main=hdd['is_main'] + )) + + servers = [] + for index, name in enumerate(hostnames): + server = _create_server( + module=module, + oneandone_conn=oneandone_conn, + hostname=name, + description=descriptions[index], + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + hdds=hdd_objs, + datacenter_id=datacenter_id, + appliance_id=appliance_id, + ssh_key=ssh_key, + private_network_id=private_network_id, + monitoring_policy_id=monitoring_policy_id, + firewall_policy_id=firewall_policy_id, + load_balancer_id=load_balancer_id, + server_type=server_type, + wait=wait, + wait_timeout=wait_timeout, + wait_interval=wait_interval) + if server: + servers.append(server) + + changed = False + + if servers: + for server in servers: + if server: + _check_mode(module, True) + _check_mode(module, False) + servers = [_insert_network_data(_server) for _server in servers] + changed = True + + _check_mode(module, False) + + return (changed, servers) + + +def remove_server(module, oneandone_conn): + """ + Removes a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary containing a 'changed' attribute indicating whether + the server was removed, and a 'removed_server' attribute with + the removed server's hostname and id. + """ + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + removed_server = None + + server = get_server(oneandone_conn, server_id, True) + if server: + _check_mode(module, True) + try: + oneandone_conn.delete_server(server_id=server['id']) + if wait: + wait_for_resource_deletion_completion(oneandone_conn, + OneAndOneResources.server, + server['id'], + wait_timeout, + wait_interval) + changed = True + except Exception as ex: + module.fail_json( + msg="failed to terminate the server: %s" % str(ex)) + + removed_server = { + 'id': server['id'], + 'hostname': server['name'] + } + _check_mode(module, False) + + return (changed, removed_server) + + +def startstop_server(module, oneandone_conn): + """ + Starts or Stops a server. + + module : AnsibleModule object + oneandone_conn: authenticated oneandone object. + + Returns a dictionary with a 'changed' attribute indicating whether + anything has changed for the server as a result of this function + being run, and a 'server' attribute with basic information for + the server. + """ + state = module.params.get('state') + server_id = module.params.get('server') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + wait_interval = module.params.get('wait_interval') + + changed = False + + # Resolve server + server = get_server(oneandone_conn, server_id, True) + if server: + # Attempt to change the server state, only if it's not already there + # or on its way. + try: + if state == 'stopped' and server['status']['state'] == 'POWERED_ON': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_OFF', + method='SOFTWARE') + elif state == 'running' and server['status']['state'] == 'POWERED_OFF': + _check_mode(module, True) + oneandone_conn.modify_server_status( + server_id=server['id'], + action='POWER_ON', + method='SOFTWARE') + except Exception as ex: + module.fail_json( + msg="failed to set server %s to state %s: %s" % ( + server_id, state, str(ex))) + + _check_mode(module, False) + + # Make sure the server has reached the desired state + if wait: + operation_completed = False + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(wait_interval) + server = oneandone_conn.get_server(server['id']) # refresh + server_state = server['status']['state'] + if state == 'stopped' and server_state == 'POWERED_OFF': + operation_completed = True + break + if state == 'running' and server_state == 'POWERED_ON': + operation_completed = True + break + if not operation_completed: + module.fail_json( + msg="Timeout waiting for server %s to get to state %s" % ( + server_id, state)) + + changed = True + server = _insert_network_data(server) + + _check_mode(module, False) + + return (changed, server) + + +def _auto_increment_hostname(count, hostname): + """ + Allow a custom incremental count in the hostname when defined with the + string formatting (%) operator. Otherwise, increment using name-01, + name-02, name-03, and so forth. + """ + if '%' not in hostname: + hostname = "%s-%%01d" % hostname + + return [ + hostname % i + for i in xrange(1, count + 1) + ] + + +def _auto_increment_description(count, description): + """ + Allow the incremental count in the description when defined with the + string formatting (%) operator. Otherwise, repeat the same description. + """ + if '%' in description: + return [ + description % i + for i in xrange(1, count + 1) + ] + else: + return [description] * count + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + default=os.environ.get('ONEANDONE_AUTH_TOKEN'), + no_log=True), + api_url=dict( + type='str', + default=os.environ.get('ONEANDONE_API_URL')), + hostname=dict(type='str'), + description=dict(type='str'), + appliance=dict(type='str'), + fixed_instance_size=dict(type='str'), + vcore=dict(type='int'), + cores_per_processor=dict(type='int'), + ram=dict(type='float'), + hdds=dict(type='list', elements='dict'), + count=dict(type='int', default=1), + ssh_key=dict(type='raw', no_log=False), + auto_increment=dict(type='bool', default=True), + server=dict(type='str'), + datacenter=dict( + choices=DATACENTERS, + default='US'), + private_network=dict(type='str'), + firewall_policy=dict(type='str'), + load_balancer=dict(type='str'), + monitoring_policy=dict(type='str'), + server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + wait_interval=dict(type='int', default=5), + state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']), + ), + supports_check_mode=True, + mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'], + ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],), + required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],) + ) + + if not HAS_ONEANDONE_SDK: + module.fail_json(msg='1and1 required for this module') + + if not module.params.get('auth_token'): + module.fail_json( + msg='The "auth_token" parameter or ' + + 'ONEANDONE_AUTH_TOKEN environment variable is required.') + + if not module.params.get('api_url'): + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token')) + else: + oneandone_conn = oneandone.client.OneAndOneService( + api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for deleting a server.") + try: + (changed, servers) = remove_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state in ('running', 'stopped'): + if not module.params.get('server'): + module.fail_json( + msg="'server' parameter is required for starting/stopping a server.") + try: + (changed, servers) = startstop_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + elif state == 'present': + for param in ('hostname', + 'appliance', + 'datacenter'): + if not module.params.get(param): + module.fail_json( + msg="%s parameter is required for new server." % param) + try: + (changed, servers) = create_server(module, oneandone_conn) + except Exception as ex: + module.fail_json(msg=str(ex)) + + module.exit_json(changed=changed, servers=servers) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/onepassword_info.py b/ansible_collections/community/general/plugins/modules/onepassword_info.py new file mode 100644 index 000000000..bb814c443 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/onepassword_info.py @@ -0,0 +1,390 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Ryan Conway (@rylon) +# Copyright (c) 2018, Scott Buchanan (onepassword.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +module: onepassword_info +author: + - Ryan Conway (@Rylon) +requirements: + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) +notes: + - Tested with C(op) version 0.5.5 + - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." +short_description: Gather items from 1Password +description: + - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. + - A fatal error occurs if any of the items being searched for can not be found. + - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. + - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)! + You must now use the C(register) option to use the facts in other tasks. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + search_terms: + type: list + elements: dict + description: + - A list of one or more search terms. + - Each search term can either be a simple string or it can be a dictionary for more control. + - When passing a simple string, I(field) is assumed to be C(password). + - When passing a dictionary, the following fields are available. + suboptions: + name: + type: str + description: + - The name of the 1Password item to search for (required). + field: + type: str + description: + - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment). + section: + type: str + description: + - The name of a section within this item containing the specified field (optional, will search all sections if not specified). + vault: + type: str + description: + - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). + required: true + auto_login: + type: dict + description: + - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) + will attempt to sign in to 1Password automatically. + - Without this option, you must have already logged in via the 1Password CLI before running Ansible. + - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt + the Ansible Vault is equal to or greater in strength than the 1Password master password. + suboptions: + subdomain: + type: str + description: + - 1Password subdomain name (.1password.com). + - If this is not specified, the most recent subdomain will be used. + username: + type: str + description: + - 1Password username. + - Only required for initial sign in. + master_password: + type: str + description: + - The master password for your subdomain. + - This is always required when specifying C(auto_login). + required: true + secret_key: + type: str + description: + - The secret key for your subdomain. + - Only required for initial sign in. + required: false + cli_path: + type: path + description: Used to specify the exact path to the C(op) command line interface + required: false + default: 'op' +''' + +EXAMPLES = ''' +# Gather secrets from 1Password, assuming there is a 'password' field: +- name: Get a password + community.general.onepassword_info: + search_terms: My 1Password item + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +# Gather secrets from 1Password, with more advanced search terms: +- name: Get a password + community.general.onepassword_info: + search_terms: + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two +# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the +# second, 'Custom field name' is fetched, as that is specified explicitly. +- name: Get a password + community.general.onepassword_info: + search_terms: + - My 1Password item # 'name' is optional when passing a simple string... + - name: My Other 1Password item # ...but it can also be set for consistency + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + - name: A 1Password item with document attachment + delegate_to: localhost + register: my_1password_item + no_log: true # Don't want to log the secrets to the console! + +- name: Debug a password (for example) + ansible.builtin.debug: + msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" +''' + +RETURN = ''' +--- +# One or more dictionaries for each matching item from 1Password, along with the appropriate fields. +# This shows the response you would expect to receive from the third example documented above. +onepassword: + description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. + returned: success + type: dict + sample: + "My 1Password item": + password: the value of this field + Custom field name: the value of this field + "My Other 1Password item": + password: the value of this field + "A 1Password item with document attachment": + document: the contents of the document attached to this item +''' + + +import errno +import json +import os +import re + +from subprocess import Popen, PIPE + +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig + + +class AnsibleModuleError(Exception): + def __init__(self, results): + self.results = results + + def __repr__(self): + return self.results + + +class OnePasswordInfo(object): + + def __init__(self): + self.cli_path = module.params.get('cli_path') + self.auto_login = module.params.get('auto_login') + self.logged_in = False + self.token = None + + terms = module.params.get('search_terms') + self.terms = self.parse_search_terms(terms) + + self._config = OnePasswordConfig() + + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): + if self.token: + # Adds the session token to all commands if we're logged in. + args += [to_bytes('--session=') + self.token] + + command = [self.cli_path] + args + p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(input=command_input) + rc = p.wait() + if not ignore_errors and rc != expected_rc: + raise AnsibleModuleError(to_native(err)) + return rc, out, err + + def _parse_field(self, data_json, item_id, field_name, section_title=None): + data = json.loads(data_json) + + if ('documentAttributes' in data['details']): + # This is actually a document, let's fetch the document data instead! + document = self._run(["get", "document", data['overview']['title']]) + return {'document': document[1].strip()} + + else: + # This is not a document, let's try to find the requested field + + # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, + # not inside it, so we need to check there first. + if (field_name in data['details']): + return {field_name: data['details'][field_name]} + + # Otherwise we continue looking inside the 'fields' attribute for the specified field. + else: + if section_title is None: + for field_data in data['details'].get('fields', []): + if field_data.get('name', '').lower() == field_name.lower(): + return {field_name: field_data.get('value', '')} + + # Not found it yet, so now lets see if there are any sections defined + # and search through those for the field. If a section was given, we skip + # any non-matching sections, otherwise we search them all until we find the field. + for section_data in data['details'].get('sections', []): + if section_title is not None and section_title.lower() != section_data['title'].lower(): + continue + for field_data in section_data.get('fields', []): + if field_data.get('t', '').lower() == field_name.lower(): + return {field_name: field_data.get('v', '')} + + # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded. + optional_section_title = '' if section_title is None else " in the section '%s'" % section_title + module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title)) + + def parse_search_terms(self, terms): + processed_terms = [] + + for term in terms: + if not isinstance(term, dict): + term = {'name': term} + + if 'name' not in term: + module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term)) + + term['field'] = term.get('field', 'password') + term['section'] = term.get('section', None) + term['vault'] = term.get('vault', None) + + processed_terms.append(term) + + return processed_terms + + def get_raw(self, item_id, vault=None): + try: + args = ["get", "item", item_id] + if vault is not None: + args += ['--vault={0}'.format(vault)] + rc, output, dummy = self._run(args) + return output + + except Exception as e: + if re.search(".*not found.*", to_native(e)): + module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id) + else: + module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e))) + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + return self._parse_field(output, item_id, field, section) if output != '' else '' + + def full_login(self): + if self.auto_login is not None: + if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'), + self.auto_login.get('secret_key'), self.auto_login.get('master_password')]: + module.fail_json(msg='Unable to perform initial sign in to 1Password. ' + 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') + + args = [ + 'signin', + '{0}.1password.com'.format(self.auto_login['subdomain']), + to_bytes(self.auto_login['username']), + to_bytes(self.auto_login['secret_key']), + '--output=raw', + ] + + try: + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + except AnsibleModuleError as e: + module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e)) + else: + module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s signin' " + "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path) + + def get_token(self): + # If the config file exists, assume an initial signin has taken place and try basic sign in + if os.path.isfile(self._config.config_file_path): + + if self.auto_login is not None: + + # Since we are not currently signed in, master_password is required at a minimum + if not self.auto_login.get('master_password'): + module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.") + + # Try signing in using the master_password and a subdomain if one is provided + try: + args = ['signin', '--output=raw'] + + if self.auto_login.get('subdomain'): + args = ['signin', self.auto_login['subdomain'], '--output=raw'] + + rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password'])) + self.token = out.strip() + + except AnsibleModuleError: + self.full_login() + + else: + self.full_login() + + else: + # Attempt a full sign in since there appears to be no existing sign in + self.full_login() + + def assert_logged_in(self): + try: + rc, out, err = self._run(['get', 'account'], ignore_errors=True) + if rc == 0: + self.logged_in = True + if not self.logged_in: + self.get_token() + except OSError as e: + if e.errno == errno.ENOENT: + module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) + raise e + + def run(self): + result = {} + + self.assert_logged_in() + + for term in self.terms: + value = self.get_field(term['name'], term['field'], term['section'], term['vault']) + + if term['name'] in result: + # If we already have a result for this key, we have to append this result dictionary + # to the existing one. This is only applicable when there is a single item + # in 1Password which has two different fields, and we want to retrieve both of them. + result[term['name']].update(value) + else: + # If this is the first result for this key, simply set it. + result[term['name']] = value + + return result + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + cli_path=dict(type='path', default='op'), + auto_login=dict(type='dict', options=dict( + subdomain=dict(type='str'), + username=dict(type='str'), + master_password=dict(required=True, type='str', no_log=True), + secret_key=dict(type='str', no_log=True), + ), default=None), + search_terms=dict(required=True, type='list', elements='dict'), + ), + supports_check_mode=True + ) + + results = {'onepassword': OnePasswordInfo().run()} + + module.exit_json(changed=False, **results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py new file mode 100644 index 000000000..541f3d669 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_datacenter_info +short_description: Retrieve information about the OneView Data Centers +description: + - Retrieve information about the OneView Data Centers. + - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)! +requirements: + - "hpOneView >= 2.0.1" +author: + - Alex Monteiro (@aalexmonteiro) + - Madhav Bharadwaj (@madhav-bharadwaj) + - Priyanka Sood (@soodpr) + - Ricardo Galeno (@ricardogpsf) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Data Center name. + type: str + options: + description: + - "Retrieve additional information. Options available: 'visualContent'." + type: list + elements: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Data Centers + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + delegate_to: localhost + register: result + +- name: Print fetched information about Data Centers + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather paginated, filtered and sorted information about Data Centers + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'state=Unmanaged' + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Data Centers + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about a Data Center by name + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + delegate_to: localhost + register: result + +- name: Print fetched information about Data Center found by name + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Gather information about the Data Center Visual Content + community.general.oneview_datacenter_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: "My Data Center" + options: + - visualContent + delegate_to: localhost + register: result + +- name: Print fetched information about Data Center found by name + ansible.builtin.debug: + msg: "{{ result.datacenters }}" + +- name: Print fetched information about Data Center Visual Content + ansible.builtin.debug: + msg: "{{ result.datacenter_visual_content }}" +''' + +RETURN = ''' +datacenters: + description: Has all the OneView information about the Data Centers. + returned: Always, but can be null. + type: dict + +datacenter_visual_content: + description: Has information about the Data Center Visual Content. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class DatacenterInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(DatacenterInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + client = self.oneview_client.datacenters + info = {} + + if self.module.params.get('name'): + datacenters = client.get_by('name', self.module.params['name']) + + if self.options and 'visualContent' in self.options: + if datacenters: + info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri']) + else: + info['datacenter_visual_content'] = None + + info['datacenters'] = datacenters + else: + info['datacenters'] = client.get_all(**self.facts_params) + + return dict(changed=False, **info) + + +def main(): + DatacenterInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py new file mode 100644 index 000000000..3e593b7ae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_enclosure_info +short_description: Retrieve information about one or more Enclosures +description: + - Retrieve information about one or more of the Enclosures from OneView. + - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Enclosure name. + type: str + options: + description: + - "List with options to gather additional information about an Enclosure and related resources. + Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization), + you can provide specific parameters." + type: list + elements: raw + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Enclosures + community.general.oneview_enclosure_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosures + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather paginated, filtered and sorted information about Enclosures + community.general.oneview_enclosure_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: status=OK + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered ans sorted list of Enclosures + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name + community.general.oneview_enclosure_info: + name: Enclosure-Name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Gather information about an Enclosure by name with options + community.general.oneview_enclosure_info: + name: Test-Enclosure + options: + - script # optional + - environmentalConfiguration # optional + - utilization # optional + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Print fetched information about Enclosure Script + ansible.builtin.debug: + msg: "{{ result.enclosure_script }}" + +- name: Print fetched information about Enclosure Environmental Configuration + ansible.builtin.debug: + msg: "{{ result.enclosure_environmental_configuration }}" + +- name: Print fetched information about Enclosure Utilization + ansible.builtin.debug: + msg: "{{ result.enclosure_utilization }}" + +- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two + specified dates" + community.general.oneview_enclosure_info: + name: Test-Enclosure + options: + - utilization: # optional + fields: AmbientTemperature + filter: + - startDate=2016-07-01T14:29:42.000Z + - endDate=2017-07-01T03:29:42.000Z + view: day + refresh: false + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Enclosure found by name + ansible.builtin.debug: + msg: "{{ result.enclosures }}" + +- name: Print fetched information about Enclosure Utilization + ansible.builtin.debug: + msg: "{{ result.enclosure_utilization }}" +''' + +RETURN = ''' +enclosures: + description: Has all the OneView information about the Enclosures. + returned: Always, but can be null. + type: dict + +enclosure_script: + description: Has all the OneView information about the script of an Enclosure. + returned: When requested, but can be null. + type: str + +enclosure_environmental_configuration: + description: Has all the OneView information about the environmental configuration of an Enclosure. + returned: When requested, but can be null. + type: dict + +enclosure_utilization: + description: Has all the OneView information about the utilization of an Enclosure. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EnclosureInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='raw'), + params=dict(type='dict') + ) + + def __init__(self): + super(EnclosureInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + info = {} + + if self.module.params['name']: + enclosures = self._get_by_name(self.module.params['name']) + + if self.options and enclosures: + info = self._gather_optional_info(self.options, enclosures[0]) + else: + enclosures = self.oneview_client.enclosures.get_all(**self.facts_params) + + info['enclosures'] = enclosures + + return dict(changed=False, **info) + + def _gather_optional_info(self, options, enclosure): + + enclosure_client = self.oneview_client.enclosures + info = {} + + if options.get('script'): + info['enclosure_script'] = enclosure_client.get_script(enclosure['uri']) + if options.get('environmentalConfiguration'): + env_config = enclosure_client.get_environmental_configuration(enclosure['uri']) + info['enclosure_environmental_configuration'] = env_config + if options.get('utilization'): + info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization']) + + return info + + def _get_utilization(self, enclosure, params): + fields = view = refresh = filter = '' + + if isinstance(params, dict): + fields = params.get('fields') + view = params.get('view') + refresh = params.get('refresh') + filter = params.get('filter') + + return self.oneview_client.enclosures.get_utilization(enclosure['uri'], + fields=fields, + filter=filter, + refresh=refresh, + view=view) + + def _get_by_name(self, name): + return self.oneview_client.enclosures.get_by('name', name) + + +def main(): + EnclosureInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py new file mode 100644 index 000000000..8eb63db5a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_ethernet_network +short_description: Manage OneView Ethernet Network resources +description: + - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Ethernet Network resource. + - C(present) will ensure data properties are compliant with OneView. + - C(absent) will remove the resource from OneView, if it exists. + - C(default_bandwidth_reset) will reset the network connection template to the default. + type: str + default: present + choices: [present, absent, default_bandwidth_reset] + data: + description: + - List with Ethernet Network properties. + type: dict + required: true +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Ensure that the Ethernet Network is present using the default configuration + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + vlanId: '201' + delegate_to: localhost + +- name: Update the Ethernet Network changing bandwidth and purpose + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + purpose: Management + bandwidth: + maximumBandwidth: 3000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: 'Test Ethernet Network' + newName: 'Renamed Ethernet Network' + delegate_to: localhost + +- name: Ensure that the Ethernet Network is absent + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: 'New Ethernet Network' + delegate_to: localhost + +- name: Create Ethernet networks in bulk + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + vlanIdRange: '1-10,15,17' + purpose: General + namePrefix: TestNetwork + smartLink: false + privateNetwork: false + bandwidth: + maximumBandwidth: 10000 + typicalBandwidth: 2000 + delegate_to: localhost + +- name: Reset to the default network connection template + community.general.oneview_ethernet_network: + config: '/etc/oneview/oneview_config.json' + state: default_bandwidth_reset + data: + name: 'Test Ethernet Network' + delegate_to: localhost +''' + +RETURN = ''' +ethernet_network: + description: Has the facts about the Ethernet Networks. + returned: On state 'present'. Can be null. + type: dict + +ethernet_network_bulk: + description: Has the facts about the Ethernet Networks affected by the bulk insert. + returned: When 'vlanIdRange' attribute is in data argument. Can be null. + type: dict + +ethernet_network_connection_template: + description: Has the facts about the Ethernet Network Connection Template. + returned: On state 'default_bandwidth_reset'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class EthernetNetworkModule(OneViewModuleBase): + MSG_CREATED = 'Ethernet Network created successfully.' + MSG_UPDATED = 'Ethernet Network updated successfully.' + MSG_DELETED = 'Ethernet Network deleted successfully.' + MSG_ALREADY_PRESENT = 'Ethernet Network is already present.' + MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.' + + MSG_BULK_CREATED = 'Ethernet Networks created successfully.' + MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.' + MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.' + MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.' + + RESOURCE_FACT_NAME = 'ethernet_network' + + def __init__(self): + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']), + data=dict(type='dict', required=True), + ) + + super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True) + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + + changed, msg, ansible_facts, resource = False, '', {}, None + + if self.data.get('name'): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + if self.data.get('vlanIdRange'): + return self._bulk_present() + else: + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + elif self.state == 'default_bandwidth_reset': + changed, msg, ansible_facts = self._default_bandwidth_reset(resource) + return dict(changed=changed, msg=msg, ansible_facts=ansible_facts) + + def _present(self, resource): + + bandwidth = self.data.pop('bandwidth', None) + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if bandwidth: + if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]: + result['changed'] = True + result['msg'] = self.MSG_UPDATED + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'ethernet_network', scope_uris) + + return result + + def _bulk_present(self): + vlan_id_range = self.data['vlanIdRange'] + result = dict(ansible_facts={}) + ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + if not ethernet_networks: + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_BULK_CREATED + + else: + vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range) + for net in ethernet_networks[:]: + vlan_ids.remove(net['vlanId']) + + if len(vlan_ids) == 0: + result['msg'] = self.MSG_BULK_ALREADY_EXIST + result['changed'] = False + else: + if len(vlan_ids) == 1: + self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0]) + else: + self.data['vlanIdRange'] = ','.join(map(str, vlan_ids)) + + self.resource_client.create_bulk(self.data) + result['changed'] = True + result['msg'] = self.MSG_MISSING_BULK_CREATED + result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range) + + return result + + def _update_connection_template(self, ethernet_network, bandwidth): + + if 'connectionTemplateUri' not in ethernet_network: + return False, None + + connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri']) + + merged_data = connection_template.copy() + merged_data.update({'bandwidth': bandwidth}) + + if not self.compare(connection_template, merged_data): + connection_template = self.oneview_client.connection_templates.update(merged_data) + return True, connection_template + else: + return False, None + + def _default_bandwidth_reset(self, resource): + + if not resource: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND) + + default_connection_template = self.oneview_client.connection_templates.get_default() + + changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth']) + + return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict( + ethernet_network_connection_template=connection_template) + + +def main(): + EthernetNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py new file mode 100644 index 000000000..e107f3b47 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_ethernet_network_info +short_description: Retrieve the information about one or more of the OneView Ethernet Networks +description: + - Retrieve the information about one or more of the Ethernet Networks from OneView. + - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Ethernet Network name. + type: str + options: + description: + - "List with options to gather additional information about an Ethernet Network and related resources. + Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)." + type: list + elements: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Ethernet Networks + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Networks + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather paginated and filtered information about Ethernet Networks + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'purpose=General' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated and filtered list of Ethernet Networks + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: Ethernet network name + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Network found by name + ansible.builtin.debug: + msg: "{{ result.ethernet_networks }}" + +- name: Gather information about an Ethernet Network by name with options + community.general.oneview_ethernet_network_info: + config: /etc/oneview/oneview_config.json + name: eth1 + options: + - associatedProfiles + - associatedUplinkGroups + delegate_to: localhost + register: result + +- name: Print fetched information about Ethernet Network Associated Profiles + ansible.builtin.debug: + msg: "{{ result.enet_associated_profiles }}" + +- name: Print fetched information about Ethernet Network Associated Uplink Groups + ansible.builtin.debug: + msg: "{{ result.enet_associated_uplink_groups }}" +''' + +RETURN = ''' +ethernet_networks: + description: Has all the OneView information about the Ethernet Networks. + returned: Always, but can be null. + type: dict + +enet_associated_profiles: + description: Has all the OneView information about the profiles which are using the Ethernet network. + returned: When requested, but can be null. + type: dict + +enet_associated_uplink_groups: + description: Has all the OneView information about the uplink sets which are using the Ethernet network. + returned: When requested, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class EthernetNetworkInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(EthernetNetworkInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + self.resource_client = self.oneview_client.ethernet_networks + + def execute_module(self): + info = {} + if self.module.params['name']: + ethernet_networks = self.resource_client.get_by('name', self.module.params['name']) + + if self.module.params.get('options') and ethernet_networks: + info = self.__gather_optional_info(ethernet_networks[0]) + else: + ethernet_networks = self.resource_client.get_all(**self.facts_params) + + info['ethernet_networks'] = ethernet_networks + + return dict(changed=False, **info) + + def __gather_optional_info(self, ethernet_network): + + info = {} + + if self.options.get('associatedProfiles'): + info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network) + if self.options.get('associatedUplinkGroups'): + info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network) + + return info + + def __get_associated_profiles(self, ethernet_network): + associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri']) + return [self.oneview_client.server_profiles.get(x) for x in associated_profiles] + + def __get_associated_uplink_groups(self, ethernet_network): + uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri']) + return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups] + + +def main(): + EthernetNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py new file mode 100644 index 000000000..4c5f867e2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_fc_network +short_description: Manage OneView Fibre Channel Network resources +description: + - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. +requirements: + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Fibre Channel Network resource. + C(present) will ensure data properties are compliant with OneView. + C(absent) will remove the resource from OneView, if it exists. + type: str + choices: ['present', 'absent'] + required: true + data: + description: + - List with the Fibre Channel Network properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Ensure that the Fibre Channel Network is present using the default configuration + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + +- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach' + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + fabricType: 'DirectAttach' + +- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: present + data: + name: 'New FC Network' + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + +- name: Ensure that the Fibre Channel Network is absent + community.general.oneview_fc_network: + config: "{{ config_file_path }}" + state: absent + data: + name: 'New FC Network' +''' + +RETURN = ''' +fc_network: + description: Has the facts about the managed OneView FC Network. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FC Network created successfully.' + MSG_UPDATED = 'FC Network updated successfully.' + MSG_DELETED = 'FC Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FC Network is already present.' + MSG_ALREADY_ABSENT = 'FC Network is already absent.' + RESOURCE_FACT_NAME = 'fc_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict( + required=True, + choices=['present', 'absent'])) + + super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fc_networks + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self._present(resource) + else: + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fc_network', scope_uris) + return result + + +def main(): + FcNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py new file mode 100644 index 000000000..d4044b08b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_fc_network_info +short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks +description: + - Retrieve the information about one or more of the Fibre Channel Networks from OneView. + - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Fibre Channel Network name. + type: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Fibre Channel Networks + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about Fibre Channel Networks + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" + +- name: Gather paginated, filtered and sorted information about Fibre Channel Networks + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 1 + count: 3 + sort: 'name:descending' + filter: 'fabricType=FabricAttach' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Fibre Channel Networks + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" + +- name: Gather information about a Fibre Channel Network by name + community.general.oneview_fc_network_info: + config: /etc/oneview/oneview_config.json + name: network name + delegate_to: localhost + register: result + +- name: Print fetched information about Fibre Channel Network found by name + ansible.builtin.debug: + msg: "{{ result.fc_networks }}" +''' + +RETURN = ''' +fc_networks: + description: Has all the OneView information about the Fibre Channel Networks. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcNetworkInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(required=False, type='str'), + params=dict(required=False, type='dict') + ) + + super(FcNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + if self.module.params['name']: + fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name']) + else: + fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) + + return dict(changed=False, fc_networks=fc_networks) + + +def main(): + FcNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py new file mode 100644 index 000000000..73eef5af0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_fcoe_network +short_description: Manage OneView FCoE Network resources +description: + - Provides an interface to manage FCoE Network resources. Can create, update, or delete. +requirements: + - "python >= 2.7.9" + - "hpOneView >= 4.0.0" +author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the FCoE Network resource. + C(present) will ensure data properties are compliant with OneView. + C(absent) will remove the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with FCoE Network properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Ensure that FCoE Network is present using the default configuration + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: Test FCoE Network + vlanId: 201 + delegate_to: localhost + +- name: Update the FCOE network scopes + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: present + data: + name: New FCoE Network + scopeUris: + - '/rest/scopes/00SC123456' + - '/rest/scopes/01SC123456' + delegate_to: localhost + +- name: Ensure that FCoE Network is absent + community.general.oneview_fcoe_network: + config: '/etc/oneview/oneview_config.json' + state: absent + data: + name: New FCoE Network + delegate_to: localhost +''' + +RETURN = ''' +fcoe_network: + description: Has the facts about the OneView FCoE Networks. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkModule(OneViewModuleBase): + MSG_CREATED = 'FCoE Network created successfully.' + MSG_UPDATED = 'FCoE Network updated successfully.' + MSG_DELETED = 'FCoE Network deleted successfully.' + MSG_ALREADY_PRESENT = 'FCoE Network is already present.' + MSG_ALREADY_ABSENT = 'FCoE Network is already absent.' + RESOURCE_FACT_NAME = 'fcoe_network' + + def __init__(self): + + additional_arg_spec = dict(data=dict(required=True, type='dict'), + state=dict(default='present', + choices=['present', 'absent'])) + + super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec, + validate_etag_support=True) + + self.resource_client = self.oneview_client.fcoe_networks + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, 'fcoe_network', scope_uris) + return result + + +def main(): + FcoeNetworkModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py new file mode 100644 index 000000000..d9ee1b379 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_fcoe_network_info +short_description: Retrieve the information about one or more of the OneView FCoE Networks +description: + - Retrieve the information about one or more of the FCoE Networks from OneView. + - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - FCoE Network name. + type: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all FCoE Networks + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about FCoE Networks + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather paginated, filtered and sorted information about FCoE Networks + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: 'vlanId=2' + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of FCoE Networks + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" + +- name: Gather information about a FCoE Network by name + community.general.oneview_fcoe_network_info: + config: /etc/oneview/oneview_config.json + name: Test FCoE Network Information + delegate_to: localhost + register: result + +- name: Print fetched information about FCoE Network found by name + ansible.builtin.debug: + msg: "{{ result.fcoe_networks }}" +''' + +RETURN = ''' +fcoe_networks: + description: Has all the OneView information about the FCoE Networks. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class FcoeNetworkInfoModule(OneViewModuleBase): + def __init__(self): + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(FcoeNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + if self.module.params['name']: + fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name']) + else: + fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) + + return dict(changed=False, fcoe_networks=fcoe_networks) + + +def main(): + FcoeNetworkInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py new file mode 100644 index 000000000..cd8e87528 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_logical_interconnect_group +short_description: Manage OneView Logical Interconnect Group resources +description: + - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Logical Interconnect Group resource. + C(absent) will remove the resource from OneView, if it exists. + C(present) will ensure data properties are compliant with OneView. + type: str + choices: [absent, present] + default: present + data: + description: + - List with the Logical Interconnect Group properties. + type: dict + required: true +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Ensure that the Logical Interconnect Group is present + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + uplinkSets: [] + enclosureType: C7000 + interconnectMapTemplate: + interconnectMapEntryTemplates: + - logicalDownlinkUri: ~ + logicalLocation: + locationEntries: + - relativeValue: 1 + type: Bay + - relativeValue: 1 + type: Enclosure + permittedInterconnectTypeName: HP VC Flex-10/10D Module + # Alternatively you can inform permittedInterconnectTypeUri + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group has the specified scopes + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: Test Logical Interconnect Group + scopeUris: + - /rest/scopes/00SC123456 + - /rest/scopes/01SC123456 + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is present with name 'Test' + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: present + data: + name: New Logical Interconnect Group + newName: Test + delegate_to: localhost + +- name: Ensure that the Logical Interconnect Group is absent + community.general.oneview_logical_interconnect_group: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: New Logical Interconnect Group + delegate_to: localhost +''' + +RETURN = ''' +logical_interconnect_group: + description: Has the facts about the OneView Logical Interconnect Group. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class LogicalInterconnectGroupModule(OneViewModuleBase): + MSG_CREATED = 'Logical Interconnect Group created successfully.' + MSG_UPDATED = 'Logical Interconnect Group updated successfully.' + MSG_DELETED = 'Logical Interconnect Group deleted successfully.' + MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.' + MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.' + MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.' + + RESOURCE_FACT_NAME = 'logical_interconnect_group' + + def __init__(self): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict') + ) + + super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.logical_interconnect_groups + + def execute_module(self): + resource = self.get_by_name(self.data['name']) + + if self.state == 'present': + return self.__present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def __present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + + self.__replace_name_by_uris(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + + if scope_uris is not None: + result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris) + + return result + + def __replace_name_by_uris(self, data): + map_template = data.get('interconnectMapTemplate') + + if map_template: + map_entry_templates = map_template.get('interconnectMapEntryTemplates') + if map_entry_templates: + for value in map_entry_templates: + permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None) + if permitted_interconnect_type_name: + value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name( + permitted_interconnect_type_name).get('uri') + + def __get_interconnect_type_by_name(self, name): + i_type = self.oneview_client.interconnect_types.get_by('name', name) + if i_type: + return i_type[0] + else: + raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND) + + +def main(): + LogicalInterconnectGroupModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py new file mode 100644 index 000000000..0111bf2c1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_logical_interconnect_group_info +short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups +description: + - Retrieve information about one or more of the Logical Interconnect Groups from OneView + - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Logical Interconnect Group name. + type: str +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Logical Interconnect Groups + community.general.oneview_logical_interconnect_group_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Logical Interconnect Groups + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups + community.general.oneview_logical_interconnect_group_info: + params: + start: 0 + count: 3 + sort: name:descending + filter: name=LIGName + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Logical Interconnect Groups + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" + +- name: Gather information about a Logical Interconnect Group by name + community.general.oneview_logical_interconnect_group_info: + name: logical interconnect group name + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Logical Interconnect Group found by name + ansible.builtin.debug: + msg: "{{ result.logical_interconnect_groups }}" +''' + +RETURN = ''' +logical_interconnect_groups: + description: Has all the OneView information about the Logical Interconnect Groups. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class LogicalInterconnectGroupInfoModule(OneViewModuleBase): + def __init__(self): + + argument_spec = dict( + name=dict(type='str'), + params=dict(type='dict'), + ) + + super(LogicalInterconnectGroupInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + if self.module.params.get('name'): + ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name']) + else: + ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) + + return dict(changed=False, logical_interconnect_groups=ligs) + + +def main(): + LogicalInterconnectGroupInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/ansible_collections/community/general/plugins/modules/oneview_network_set.py new file mode 100644 index 000000000..a6a62a05c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_network_set.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_network_set +short_description: Manage HPE OneView Network Set resources +description: + - Provides an interface to manage Network Set resources. Can create, update, or delete. +requirements: + - hpOneView >= 4.0.0 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Network Set resource. + - C(present) will ensure data properties are compliant with OneView. + - C(absent) will remove the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with the Network Set properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create a Network Set + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + networkUris: + - Test Ethernet Network_1 # can be a name + - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI + delegate_to: localhost + +- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + newName: OneViewSDK Test Network Set - Renamed + networkUris: + - Test Ethernet Network_1 + delegate_to: localhost + +- name: Delete the Network Set + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: OneViewSDK Test Network Set - Renamed + delegate_to: localhost + +- name: Update the Network set with two scopes + community.general.oneview_network_set: + config: /etc/oneview/oneview_config.json + state: present + data: + name: OneViewSDK Test Network Set + scopeUris: + - /rest/scopes/01SC123456 + - /rest/scopes/02SC123456 + delegate_to: localhost +''' + +RETURN = ''' +network_set: + description: Has the facts about the Network Set. + returned: On state 'present', but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound + + +class NetworkSetModule(OneViewModuleBase): + MSG_CREATED = 'Network Set created successfully.' + MSG_UPDATED = 'Network Set updated successfully.' + MSG_DELETED = 'Network Set deleted successfully.' + MSG_ALREADY_PRESENT = 'Network Set is already present.' + MSG_ALREADY_ABSENT = 'Network Set is already absent.' + MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: ' + RESOURCE_FACT_NAME = 'network_set' + + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent']), + data=dict(required=True, type='dict')) + + def __init__(self): + super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec, + validate_etag_support=True) + self.resource_client = self.oneview_client.network_sets + + def execute_module(self): + resource = self.get_by_name(self.data.get('name')) + + if self.state == 'present': + return self._present(resource) + elif self.state == 'absent': + return self.resource_absent(resource) + + def _present(self, resource): + scope_uris = self.data.pop('scopeUris', None) + self._replace_network_name_by_uri(self.data) + result = self.resource_present(resource, self.RESOURCE_FACT_NAME) + if scope_uris is not None: + result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris) + return result + + def _get_ethernet_network_by_name(self, name): + result = self.oneview_client.ethernet_networks.get_by('name', name) + return result[0] if result else None + + def _get_network_uri(self, network_name_or_uri): + if network_name_or_uri.startswith('/rest/ethernet-networks'): + return network_name_or_uri + else: + enet_network = self._get_ethernet_network_by_name(network_name_or_uri) + if enet_network: + return enet_network['uri'] + else: + raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri) + + def _replace_network_name_by_uri(self, data): + if 'networkUris' in data: + data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']] + + +def main(): + NetworkSetModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py new file mode 100644 index 000000000..d1a1f2913 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_network_set_info +short_description: Retrieve information about the OneView Network Sets +description: + - Retrieve information about the Network Sets from OneView. + - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + name: + description: + - Network Set name. + type: str + + options: + description: + - "List with options to gather information about Network Set. + Option allowed: C(withoutEthernet). + The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." + type: list + elements: str + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all Network Sets + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Sets + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather paginated, filtered and sorted information about Network Sets + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + params: + start: 0 + count: 3 + sort: 'name:descending' + filter: name='netset001' + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of Network Sets + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about all Network Sets, excluding Ethernet networks + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Sets, excluding Ethernet networks + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Set found by name + ansible.builtin.debug: + msg: "{{ result.network_sets }}" + +- name: Gather information about a Network Set by name, excluding Ethernet networks + community.general.oneview_network_set_info: + hostname: 172.16.101.48 + username: administrator + password: my_password + api_version: 500 + name: Name of the Network Set + options: + - withoutEthernet + no_log: true + delegate_to: localhost + register: result + +- name: Print fetched information about Network Set found by name, excluding Ethernet networks + ansible.builtin.debug: + msg: "{{ result.network_sets }}" +''' + +RETURN = ''' +network_sets: + description: Has all the OneView information about the Network Sets. + returned: Always, but can be empty. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class NetworkSetInfoModule(OneViewModuleBase): + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='str'), + params=dict(type='dict'), + ) + + def __init__(self): + super(NetworkSetInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + + def execute_module(self): + + name = self.module.params.get('name') + + if 'withoutEthernet' in self.options: + filter_by_name = ("\"'name'='%s'\"" % name) if name else '' + network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name) + elif name: + network_sets = self.oneview_client.network_sets.get_by('name', name) + else: + network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) + + return dict(changed=False, network_sets=network_sets) + + +def main(): + NetworkSetInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py new file mode 100644 index 000000000..65a016b1c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_san_manager +short_description: Manage OneView SAN Manager resources +description: + - Provides an interface to manage SAN Manager resources. Can create, update, or delete. +requirements: + - hpOneView >= 3.1.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicates the desired state for the Uplink Set resource. + - C(present) ensures data properties are compliant with OneView. + - C(absent) removes the resource from OneView, if it exists. + - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. + type: str + default: present + choices: [present, absent, connection_information_set] + data: + description: + - List with SAN Manager properties. + type: dict + required: true + +extends_documentation_fragment: + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + providerDisplayName: Brocade Network Advisor + connectionInfo: + - name: Host + value: 172.18.15.1 + - name: Port + value: 5989 + - name: Username + value: username + - name: Password + value: password + - name: UseSsl + value: true + delegate_to: localhost + +- name: Ensure a Device Manager for the Cisco SAN Provider is present + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.20.1 + providerDisplayName: Cisco + connectionInfo: + - name: Host + value: 172.18.20.1 + - name: SnmpPort + value: 161 + - name: SnmpUserName + value: admin + - name: SnmpAuthLevel + value: authnopriv + - name: SnmpAuthProtocol + value: sha + - name: SnmpAuthString + value: password + delegate_to: localhost + +- name: Sets the SAN Manager connection information + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: connection_information_set + data: + connectionInfo: + - name: Host + value: '172.18.15.1' + - name: Port + value: '5989' + - name: Username + value: 'username' + - name: Password + value: 'password' + - name: UseSsl + value: true + delegate_to: localhost + +- name: Refreshes the SAN Manager + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: present + data: + name: 172.18.15.1 + refreshState: RefreshPending + delegate_to: localhost + +- name: Delete the SAN Manager recently created + community.general.oneview_san_manager: + config: /etc/oneview/oneview_config.json + state: absent + data: + name: '172.18.15.1' + delegate_to: localhost +''' + +RETURN = ''' +san_manager: + description: Has the OneView facts about the SAN Manager. + returned: On state 'present'. Can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError + + +class SanManagerModule(OneViewModuleBase): + MSG_CREATED = 'SAN Manager created successfully.' + MSG_UPDATED = 'SAN Manager updated successfully.' + MSG_DELETED = 'SAN Manager deleted successfully.' + MSG_ALREADY_PRESENT = 'SAN Manager is already present.' + MSG_ALREADY_ABSENT = 'SAN Manager is already absent.' + MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found." + + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']), + data=dict(type='dict', required=True) + ) + + def __init__(self): + super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True) + self.resource_client = self.oneview_client.san_managers + + def execute_module(self): + if self.data.get('connectionInfo'): + for connection_hash in self.data.get('connectionInfo'): + if connection_hash.get('name') == 'Host': + resource_name = connection_hash.get('value') + elif self.data.get('name'): + resource_name = self.data.get('name') + else: + msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. ' + msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.' + raise OneViewModuleValueError(msg.format()) + + resource = self.resource_client.get_by_name(resource_name) + + if self.state == 'present': + changed, msg, san_manager = self._present(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + elif self.state == 'absent': + return self.resource_absent(resource, method='remove') + + elif self.state == 'connection_information_set': + changed, msg, san_manager = self._connection_information_set(resource) + return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager)) + + def _present(self, resource): + if not resource: + provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data)) + return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri) + else: + merged_data = resource.copy() + merged_data.update(self.data) + + # Remove 'connectionInfo' from comparison, since it is not possible to validate it. + resource.pop('connectionInfo', None) + merged_data.pop('connectionInfo', None) + + if self.compare(resource, merged_data): + return False, self.MSG_ALREADY_PRESENT, resource + else: + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _connection_information_set(self, resource): + if not resource: + return self._present(resource) + else: + merged_data = resource.copy() + merged_data.update(self.data) + merged_data.pop('refreshState', None) + if not self.data.get('connectionInfo', None): + raise OneViewModuleValueError('A connectionInfo field is required for this operation.') + updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri']) + return True, self.MSG_UPDATED, updated_san_manager + + def _get_provider_uri_by_display_name(self, data): + display_name = data.get('providerDisplayName') + provider_uri = self.resource_client.get_provider_uri(display_name) + + if not provider_uri: + raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name)) + + return provider_uri + + +def main(): + SanManagerModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py new file mode 100644 index 000000000..9b00a6bb5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: oneview_san_manager_info +short_description: Retrieve information about one or more of the OneView SAN Managers +description: + - Retrieve information about one or more of the SAN Managers from OneView + - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)! +requirements: + - hpOneView >= 2.0.1 +author: + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + provider_display_name: + description: + - Provider Display Name. + type: str + params: + description: + - List of params to delimit, filter and sort the list of resources. + - "params allowed: + - C(start): The first item to return, using 0-based indexing. + - C(count): The number of resources to return. + - C(query): A general query string to narrow the list of resources returned. + - C(sort): The sort order of the returned data set." + type: dict +extends_documentation_fragment: + - community.general.oneview + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = ''' +- name: Gather information about all SAN Managers + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + delegate_to: localhost + register: result + +- name: Print fetched information about SAN Managers + ansible.builtin.debug: + msg: "{{ result.san_managers }}" + +- name: Gather paginated, filtered and sorted information about SAN Managers + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + params: + start: 0 + count: 3 + sort: name:ascending + query: isInternal eq false + delegate_to: localhost + register: result + +- name: Print fetched information about paginated, filtered and sorted list of SAN Managers + ansible.builtin.debug: + msg: "{{ result.san_managers }}" + +- name: Gather information about a SAN Manager by provider display name + community.general.oneview_san_manager_info: + config: /etc/oneview/oneview_config.json + provider_display_name: Brocade Network Advisor + delegate_to: localhost + register: result + +- name: Print fetched information about SAN Manager found by provider display name + ansible.builtin.debug: + msg: "{{ result.san_managers }}" +''' + +RETURN = ''' +san_managers: + description: Has all the OneView information about the SAN Managers. + returned: Always, but can be null. + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase + + +class SanManagerInfoModule(OneViewModuleBase): + argument_spec = dict( + provider_display_name=dict(type='str'), + params=dict(type='dict') + ) + + def __init__(self): + super(SanManagerInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) + self.resource_client = self.oneview_client.san_managers + + def execute_module(self): + if self.module.params.get('provider_display_name'): + provider_display_name = self.module.params['provider_display_name'] + san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name) + if san_manager: + resources = [san_manager] + else: + resources = [] + else: + resources = self.oneview_client.san_managers.get_all(**self.facts_params) + + return dict(changed=False, san_managers=resources) + + +def main(): + SanManagerInfoModule().run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/online_server_info.py b/ansible_collections/community/general/plugins/modules/online_server_info.py new file mode 100644 index 000000000..f6d03cb27 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/online_server_info.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: online_server_info +short_description: Gather information about Online servers +description: + - Gather information about the servers. + - U(https://www.online.net/en/dedicated-server) +author: + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.online + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = r''' +- name: Gather Online server information + community.general.online_server_info: + api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' + register: result + +- ansible.builtin.debug: + msg: "{{ result.online_server_info }}" +''' + +RETURN = r''' +online_server_info: + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." + returned: success + type: list + elements: dict + sample: + "online_server_info": [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" + }, + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineServerInfo(Online): + + def __init__(self, module): + super(OnlineServerInfo, self).__init__(module) + self.name = 'api/v1/server' + + def _get_server_detail(self, server_path): + try: + return self.get(path=server_path).json + except OnlineException as exc: + self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) + + def all_detailed_servers(self): + servers_api_path = self.get_resources() + + server_data = ( + self._get_server_detail(server_api_path) + for server_api_path in servers_api_path + ) + + return [s for s in server_data if s is not None] + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + servers_info = OnlineServerInfo(module).all_detailed_servers() + module.exit_json( + online_server_info=servers_info + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/online_user_info.py b/ansible_collections/community/general/plugins/modules/online_user_info.py new file mode 100644 index 000000000..1d91418ca --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/online_user_info.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +module: online_user_info +short_description: Gather information about Online user +description: + - Gather information about the user. +author: + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.online + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = r''' +- name: Gather Online user info + community.general.online_user_info: + register: result + +- ansible.builtin.debug: + msg: "{{ result.online_user_info }}" +''' + +RETURN = r''' +online_user_info: + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." + returned: success + type: dict + sample: + "online_user_info": { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineUserInfo(Online): + + def __init__(self, module): + super(OnlineUserInfo, self).__init__(module) + self.name = 'api/v1/user' + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + module.exit_json( + online_user_info=OnlineUserInfo(module).get_resources() + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/open_iscsi.py b/ansible_collections/community/general/plugins/modules/open_iscsi.py new file mode 100644 index 000000000..af08d1c54 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/open_iscsi.py @@ -0,0 +1,464 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Serge van Ginderachter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: open_iscsi +author: + - Serge van Ginderachter (@srvg) +short_description: Manage iSCSI targets with Open-iSCSI +description: + - Discover targets on given portal, (dis)connect targets, mark targets to + manually or auto start, return device nodes of connected targets. +requirements: + - open_iscsi library and tools (iscsiadm) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + portal: + description: + - The domain name or IP address of the iSCSI target. + type: str + aliases: [ ip ] + port: + description: + - The port on which the iSCSI target process listens. + type: str + default: '3260' + target: + description: + - The iSCSI target name. + type: str + aliases: [ name, targetname ] + login: + description: + - Whether the target node should be connected. + type: bool + aliases: [ state ] + node_auth: + description: + - The value for C(node.session.auth.authmethod). + type: str + default: CHAP + node_user: + description: + - The value for C(node.session.auth.username). + type: str + node_pass: + description: + - The value for C(node.session.auth.password). + type: str + node_user_in: + description: + - The value for C(node.session.auth.username_in). + type: str + version_added: 3.8.0 + node_pass_in: + description: + - The value for C(node.session.auth.password_in). + type: str + version_added: 3.8.0 + auto_node_startup: + description: + - Whether the target node should be automatically connected at startup. + type: bool + aliases: [ automatic ] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 + discover: + description: + - Whether the list of target nodes on the portal should be + (re)discovered and added to the persistent iSCSI database. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) + to manual, hence combined with I(auto_node_startup=true) will always return + a changed state. + type: bool + default: false + show_nodes: + description: + - Whether the list of nodes in the persistent iSCSI database should be returned by the module. + type: bool + default: false + rescan: + description: + - Rescan an established session for discovering new targets. + - When I(target) is omitted, will rescan all sessions. + type: bool + default: false + version_added: 4.1.0 + +''' + +EXAMPLES = r''' +- name: Perform a discovery on sun.com and show available target nodes + community.general.open_iscsi: + show_nodes: true + discover: true + portal: sun.com + +- name: Perform a discovery on 10.1.2.3 and show available target nodes + community.general.open_iscsi: + show_nodes: true + discover: true + ip: 10.1.2.3 + +# NOTE: Only works if exactly one target is exported to the initiator +- name: Discover targets on portal and login to the one available + community.general.open_iscsi: + portal: '{{ iscsi_target }}' + login: true + discover: true + +- name: Connect to the named target, after updating the local persistent database (cache) + community.general.open_iscsi: + login: true + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Disconnect from the cached named target + community.general.open_iscsi: + login: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Override and disable automatic portal login on specific portal + community.general.open_iscsi: + login: false + portal: 10.1.1.250 + auto_portal_startup: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Rescan one or all established sessions to discover new targets (omit target for all sessions) + community.general.open_iscsi: + rescan: true + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d +''' + +import glob +import os +import re +import socket +import time + +from ansible.module_utils.basic import AnsibleModule + +ISCSIADM = 'iscsiadm' +iscsiadm_cmd = None + + +def compare_nodelists(l1, l2): + l1.sort() + l2.sort() + return l1 == l2 + + +def iscsi_get_cached_nodes(module, portal=None): + cmd = [iscsiadm_cmd, '--mode', 'node'] + rc, out, err = module.run_command(cmd) + + nodes = [] + if rc == 0: + lines = out.splitlines() + for line in lines: + # line format is "ip:port,target_portal_group_tag targetname" + parts = line.split() + if len(parts) > 2: + module.fail_json(msg='error parsing output', cmd=cmd) + target = parts[1] + parts = parts[0].split(':') + target_portal = parts[0] + + if portal is None or portal == target_portal: + nodes.append(target) + + # older versions of scsiadm don't have nice return codes + # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details + # err can contain [N|n]o records... + elif rc == 21 or (rc == 255 and "o records found" in err): + pass + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + return nodes + + +def iscsi_discover(module, portal, port): + cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)] + module.run_command(cmd, check_rc=True) + + +def iscsi_rescan(module, target=None): + if target is None: + cmd = [iscsiadm_cmd, '--mode', 'session', '--rescan'] + else: + cmd = [iscsiadm_cmd, '--mode', 'node', '--rescan', '-T', target] + rc, out, err = module.run_command(cmd) + return out + + +def target_loggedon(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'session'] + rc, out, err = module.run_command(cmd) + + if portal is None: + portal = "" + if port is None: + port = "" + + if rc == 0: + search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target)) + return re.search(search_re, out) is not None + elif rc == 21: + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_login(module, target, portal=None, port=None): + node_auth = module.params['node_auth'] + node_user = module.params['node_user'] + node_pass = module.params['node_pass'] + node_user_in = module.params['node_user_in'] + node_pass_in = module.params['node_pass_in'] + + if node_user: + params = [('node.session.auth.authmethod', node_auth), + ('node.session.auth.username', node_user), + ('node.session.auth.password', node_pass)] + for (name, value) in params: + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=True) + + if node_user_in: + params = [('node.session.auth.username_in', node_user_in), + ('node.session.auth.password_in', node_pass_in)] + for (name, value) in params: + cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) + module.run_command(cmd, check_rc=True) + + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + module.run_command(cmd, check_rc=True) + + +def target_logout(module, target): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout'] + module.run_command(cmd, check_rc=True) + + +def target_device_node(target): + # if anyone know a better way to find out which devicenodes get created for + # a given target... + + devices = glob.glob('/dev/disk/by-path/*%s*' % target) + devdisks = [] + for dev in devices: + # exclude partitions + if "-part" not in dev: + devdisk = os.path.realpath(dev) + # only add once (multi-path?) + if devdisk not in devdisks: + devdisks.append(devdisk) + return devdisks + + +def target_isauto(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + dummy, out, dummy = module.run_command(cmd, check_rc=True) + + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False + + +def target_setauto(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic'] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + module.run_command(cmd, check_rc=True) + + +def target_setmanual(module, target, portal=None, port=None): + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual'] + + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) + + module.run_command(cmd, check_rc=True) + + +def main(): + # load ansible module object + module = AnsibleModule( + argument_spec=dict( + + # target + portal=dict(type='str', aliases=['ip']), + port=dict(type='str', default='3260'), + target=dict(type='str', aliases=['name', 'targetname']), + node_auth=dict(type='str', default='CHAP'), + node_user=dict(type='str'), + node_pass=dict(type='str', no_log=True), + node_user_in=dict(type='str'), + node_pass_in=dict(type='str', no_log=True), + + # actions + login=dict(type='bool', aliases=['state']), + auto_node_startup=dict(type='bool', aliases=['automatic']), + auto_portal_startup=dict(type='bool'), + discover=dict(type='bool', default=False), + show_nodes=dict(type='bool', default=False), + rescan=dict(type='bool', default=False), + ), + + required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], + required_if=[('discover', True, ['portal'])], + supports_check_mode=True, + ) + + global iscsiadm_cmd + iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) + + # parameters + portal = module.params['portal'] + if portal: + try: + portal = socket.getaddrinfo(portal, None)[0][4][0] + except socket.gaierror: + module.fail_json(msg="Portal address is incorrect") + + target = module.params['target'] + port = module.params['port'] + login = module.params['login'] + automatic = module.params['auto_node_startup'] + automatic_portal = module.params['auto_portal_startup'] + discover = module.params['discover'] + show_nodes = module.params['show_nodes'] + rescan = module.params['rescan'] + + check = module.check_mode + + cached = iscsi_get_cached_nodes(module, portal) + + # return json dict + result = {'changed': False} + + if discover: + if check: + nodes = cached + else: + iscsi_discover(module, portal, port) + nodes = iscsi_get_cached_nodes(module, portal) + if not compare_nodelists(cached, nodes): + result['changed'] |= True + result['cache_updated'] = True + else: + nodes = cached + + if login is not None or automatic is not None: + if target is None: + if len(nodes) > 1: + module.fail_json(msg="Need to specify a target") + else: + target = nodes[0] + else: + # check given target is in cache + check_target = False + for node in nodes: + if node == target: + check_target = True + break + if not check_target: + module.fail_json(msg="Specified target not found") + + if show_nodes: + result['nodes'] = nodes + + if login is not None: + loggedon = target_loggedon(module, target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True + + if automatic is not None: + isauto = target_isauto(module, target) + if (automatic and isauto) or (not automatic and not isauto): + result['changed'] |= False + result['automatic_changed'] = False + elif not check: + if automatic: + target_setauto(module, target) + else: + target_setmanual(module, target) + result['changed'] |= True + result['automatic_changed'] = True + else: + result['changed'] |= True + result['automatic_changed'] = True + + if automatic_portal is not None: + isauto = target_isauto(module, target, portal, port) + if (automatic_portal and isauto) or (not automatic_portal and not isauto): + result['changed'] |= False + result['automatic_portal_changed'] = False + elif not check: + if automatic_portal: + target_setauto(module, target, portal, port) + else: + target_setmanual(module, target, portal, port) + result['changed'] |= True + result['automatic_portal_changed'] = True + else: + result['changed'] |= True + result['automatic_portal_changed'] = True + + if rescan is not False: + result['changed'] = True + result['sessions'] = iscsi_rescan(module, target) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py new file mode 100644 index 000000000..2baea828a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py @@ -0,0 +1,664 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Patrik Lundin +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: openbsd_pkg +author: + - Patrik Lundin (@eest) +short_description: Manage packages on OpenBSD +description: + - Manage packages on OpenBSD using the pkg tools. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - A name or a list of names of the packages. + required: true + type: list + elements: str + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + choices: [ absent, latest, present, installed, removed ] + default: present + type: str + build: + description: + - Build the package from source instead of downloading and installing + a binary. Requires that the port source tree is already installed. + Automatically builds and installs the 'sqlports' package, if it is + not already installed. + - Mutually exclusive with I(snapshot). + type: bool + default: false + snapshot: + description: + - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. + - Mutually exclusive with I(build). + type: bool + default: false + version_added: 1.3.0 + ports_dir: + description: + - When used in combination with the C(build) option, allows overriding + the default ports source directory. + default: /usr/ports + type: path + clean: + description: + - When updating or removing packages, delete the extra configuration + file(s) in the old packages which are annotated with @extra in + the packaging-list. + type: bool + default: false + quick: + description: + - Replace or delete packages quickly; do not bother with checksums + before removing normal files. + type: bool + default: false +notes: + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. +''' + +EXAMPLES = ''' +- name: Make sure nmap is installed + community.general.openbsd_pkg: + name: nmap + state: present + +- name: Make sure nmap is the latest version + community.general.openbsd_pkg: + name: nmap + state: latest + +- name: Make sure nmap is not installed + community.general.openbsd_pkg: + name: nmap + state: absent + +- name: Make sure nmap is installed, build it from source if it is not + community.general.openbsd_pkg: + name: nmap + state: present + build: true + +- name: Specify a pkg flavour with '--' + community.general.openbsd_pkg: + name: vim--no_x11 + state: present + +- name: Specify the default flavour to avoid ambiguity errors + community.general.openbsd_pkg: + name: vim-- + state: present + +- name: Specify a package branch (requires at least OpenBSD 6.0) + community.general.openbsd_pkg: + name: python%3.5 + state: present + +- name: Update all packages on the system + community.general.openbsd_pkg: + name: '*' + state: latest + +- name: Purge a package and it's configuration files + community.general.openbsd_pkg: + name: mpd + clean: true + state: absent + +- name: Quickly remove a package without checking checksums + community.general.openbsd_pkg: + name: qt5 + quick: true + state: absent +''' + +import os +import platform +import re +import shlex +import sqlite3 + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +# Function used for executing commands. +def execute_command(cmd, module): + # Break command line into arguments. + # This makes run_command() use shell=False which we need to not cause shell + # expansion of special characters like '*'. + cmd_args = shlex.split(cmd) + + # We set TERM to 'dumb' to keep pkg_add happy if the machine running + # ansible is using a TERM that the managed machine does not know about, + # e.g.: "No progress meter: failed termcap lookup on xterm-kitty". + return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) + + +# Function used to find out if a package is currently installed. +def get_package_state(names, pkg_spec, module): + info_cmd = 'pkg_info -Iq' + + for name in names: + command = "%s inst:%s" % (info_cmd, name) + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_package_state(): " + stderr) + + if stdout: + # If the requested package name is just a stem, like "python", we may + # find multiple packages with that name. + pkg_spec[name]['installed_names'] = stdout.splitlines() + module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names']) + pkg_spec[name]['installed_state'] = True + else: + pkg_spec[name]['installed_state'] = False + + +# Function used to make sure a package is present. +def package_present(names, pkg_spec, module): + build = module.params['build'] + + for name in names: + # It is possible package_present() has been called from package_latest(). + # In that case we do not want to operate on the whole list of names, + # only the leftovers. + if pkg_spec['package_latest_leftovers']: + if name not in pkg_spec['package_latest_leftovers']: + module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name) + continue + else: + module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name) + + if module.check_mode: + install_cmd = 'pkg_add -Imn' + else: + if build is True: + port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module)) + if os.path.isdir(port_dir): + if pkg_spec[name]['flavor']: + flavors = pkg_spec[name]['flavor'].replace('-', ' ') + install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors) + elif pkg_spec[name]['subpackage']: + install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, + pkg_spec[name]['subpackage']) + else: + install_cmd = "cd %s && make install && make clean=depends" % (port_dir) + else: + module.fail_json(msg="the port source directory %s does not exist" % (port_dir)) + else: + install_cmd = 'pkg_add -Im' + + if module.params['snapshot'] is True: + install_cmd += ' -Dsnap' + + if pkg_spec[name]['installed_state'] is False: + + # Attempt to install the package + if build is True and not module.check_mode: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True) + else: + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module) + + # The behaviour of pkg_add is a bit different depending on if a + # specific version is supplied or not. + # + # When a specific version is supplied the return code will be 0 when + # a package is found and 1 when it is not. If a version is not + # supplied the tool will exit 0 in both cases. + # + # It is important to note that "version" relates to the + # packages-specs(7) notion of a version. If using the branch syntax + # (like "python%3.5") even though a branch name may look like a + # version string it is not used an one by pkg_add. + if pkg_spec[name]['version'] or build is True: + # Depend on the return code. + module.debug("package_present(): depending on return code for name '%s'" % name) + if pkg_spec[name]['rc']: + pkg_spec[name]['changed'] = False + else: + # Depend on stderr instead. + module.debug("package_present(): depending on stderr for name '%s'" % name) + if pkg_spec[name]['stderr']: + # There is a corner case where having an empty directory in + # installpath prior to the right location will result in a + # "file:/local/package/directory/ is empty" message on stderr + # while still installing the package, so we need to look for + # for a message like "packagename-1.0: ok" just in case. + match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout']) + + if match: + # It turns out we were able to install the package. + module.debug("package_present(): we were able to install package for name '%s'" % name) + pkg_spec[name]['changed'] = True + else: + # We really did fail, fake the return code. + module.debug("package_present(): we really did fail for name '%s'" % name) + pkg_spec[name]['rc'] = 1 + pkg_spec[name]['changed'] = False + else: + module.debug("package_present(): stderr was not set for name '%s'" % name) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to make sure a package is the latest available version. +def package_latest(names, pkg_spec, module): + if module.params['build'] is True: + module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build']) + + upgrade_cmd = 'pkg_add -um' + + if module.check_mode: + upgrade_cmd += 'n' + + if module.params['clean']: + upgrade_cmd += 'c' + + if module.params['quick']: + upgrade_cmd += 'q' + + if module.params['snapshot']: + upgrade_cmd += ' -Dsnap' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + + # Attempt to upgrade the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module) + + # Look for output looking something like "nmap-6.01->6.25: ok" to see if + # something changed (or would have changed). Use \W to delimit the match + # from progress meter output. + pkg_spec[name]['changed'] = False + for installed_name in pkg_spec[name]['installed_names']: + module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) + match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout']) + if match: + module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) + + pkg_spec[name]['changed'] = True + break + + # FIXME: This part is problematic. Based on the issues mentioned (and + # handled) in package_present() it is not safe to blindly trust stderr + # as an indicator that the command failed, and in the case with + # empty installpath directories this will break. + # + # For now keep this safeguard here, but ignore it if we managed to + # parse out a successful update above. This way we will report a + # successful run when we actually modify something but fail + # otherwise. + if pkg_spec[name]['changed'] is not True: + if pkg_spec[name]['stderr']: + pkg_spec[name]['rc'] = 1 + + else: + # Note packages that need to be handled by package_present + module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name) + pkg_spec['package_latest_leftovers'].append(name) + + # If there were any packages that were not installed we call + # package_present() which will handle those. + if pkg_spec['package_latest_leftovers']: + module.debug("package_latest(): calling package_present() to handle leftovers") + package_present(names, pkg_spec, module) + + +# Function used to make sure a package is not installed. +def package_absent(names, pkg_spec, module): + remove_cmd = 'pkg_delete -I' + + if module.check_mode: + remove_cmd += 'n' + + if module.params['clean']: + remove_cmd += 'c' + + if module.params['quick']: + remove_cmd += 'q' + + for name in names: + if pkg_spec[name]['installed_state'] is True: + # Attempt to remove the package. + (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module) + + if pkg_spec[name]['rc'] == 0: + pkg_spec[name]['changed'] = True + else: + pkg_spec[name]['changed'] = False + + else: + pkg_spec[name]['rc'] = 0 + pkg_spec[name]['stdout'] = '' + pkg_spec[name]['stderr'] = '' + pkg_spec[name]['changed'] = False + + +# Function used to parse the package name based on packages-specs(7). +# The general name structure is "stem-version[-flavors]". +# +# Names containing "%" are a special variation not part of the +# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a +# description. +def parse_package_name(names, pkg_spec, module): + + # Initialize empty list of package_latest() leftovers. + pkg_spec['package_latest_leftovers'] = [] + + for name in names: + module.debug("parse_package_name(): parsing name: %s" % name) + # Do some initial matches so we can base the more advanced regex on that. + version_match = re.search("-[0-9]", name) + versionless_match = re.search("--", name) + + # Stop if someone is giving us a name that both has a version and is + # version-less at the same time. + if version_match and versionless_match: + module.fail_json(msg="package name both has a version and is version-less: " + name) + + # All information for a given name is kept in the pkg_spec keyed by that name. + pkg_spec[name] = {} + + # If name includes a version. + if version_match: + match = re.search("^(?P[^%]+)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = match.group('version') + pkg_spec[name]['flavor_separator'] = match.group('flavor_separator') + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'version' + module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, " + "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at version_match: " + name) + + # If name includes no version but is version-less ("--"). + elif versionless_match: + match = re.search("^(?P[^%]+)--(?P[a-z].*)?(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = '-' + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = '-' + pkg_spec[name]['flavor'] = match.group('flavor') + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'versionless' + module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at versionless_match: " + name) + + # If name includes no version, and is not version-less, it is all a + # stem, possibly with a branch (%branchname) tacked on at the + # end. + else: + match = re.search("^(?P[^%]+)(%(?P.+))?$", name) + if match: + pkg_spec[name]['stem'] = match.group('stem') + pkg_spec[name]['version_separator'] = None + pkg_spec[name]['version'] = None + pkg_spec[name]['flavor_separator'] = None + pkg_spec[name]['flavor'] = None + pkg_spec[name]['branch'] = match.group('branch') + pkg_spec[name]['style'] = 'stem' + module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name]) + else: + module.fail_json(msg="unable to parse package name at else: " + name) + + # Verify that the managed host is new enough to support branch syntax. + if pkg_spec[name]['branch']: + branch_release = "6.0" + + if LooseVersion(platform.release()) < LooseVersion(branch_release): + module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name)) + + # Sanity check that there are no trailing dashes in flavor. + # Try to stop strange stuff early so we can be strict later. + if pkg_spec[name]['flavor']: + match = re.search("-$", pkg_spec[name]['flavor']) + if match: + module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor']) + + +# Function used for figuring out the port path. +def get_package_source_path(name, pkg_spec, module): + pkg_spec[name]['subpackage'] = None + if pkg_spec[name]['stem'] == 'sqlports': + return 'databases/sqlports' + else: + # try for an exact match first + sqlports_db_file = '/usr/local/share/sqlports' + if not os.path.isfile(sqlports_db_file): + module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file) + + conn = sqlite3.connect(sqlports_db_file) + first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' + query = first_part_of_query + ' = ?' + module.debug("package_package_source_path(): exact query: %s" % query) + cursor = conn.execute(query, (name,)) + results = cursor.fetchall() + + # next, try for a fuzzier match + if len(results) < 1: + looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%') + query = first_part_of_query + ' LIKE ?' + if pkg_spec[name]['flavor']: + looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor'] + module.debug("package_package_source_path(): fuzzy flavor query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + elif pkg_spec[name]['style'] == 'versionless': + query += ' AND fullpkgname NOT LIKE ?' + module.debug("package_package_source_path(): fuzzy versionless query: %s" % query) + cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,)) + else: + module.debug("package_package_source_path(): fuzzy query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + results = cursor.fetchall() + + # error if we don't find exactly 1 match + conn.close() + if len(results) < 1: + module.fail_json(msg="could not find a port by the name '%s'" % name) + if len(results) > 1: + matches = map(lambda x: x[1], results) + module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches)) + + # there's exactly 1 match, so figure out the subpackage, if any, then return + fullpkgpath = results[0][0] + parts = fullpkgpath.split(',') + if len(parts) > 1 and parts[1][0] == '-': + pkg_spec[name]['subpackage'] = parts[1] + return parts[0] + + +# Function used for upgrading all installed packages. +def upgrade_packages(pkg_spec, module): + if module.check_mode: + upgrade_cmd = 'pkg_add -Imnu' + else: + upgrade_cmd = 'pkg_add -Imu' + + if module.params['snapshot']: + upgrade_cmd += ' -Dsnap' + + # Create a minimal pkg_spec entry for '*' to store return values. + pkg_spec['*'] = {} + + # Attempt to upgrade all packages. + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module) + + # Try to find any occurrence of a package changing version like: + # "bzip2-1.0.6->1.0.6p0: ok". + match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout']) + if match: + pkg_spec['*']['changed'] = True + + else: + pkg_spec['*']['changed'] = False + + # It seems we can not trust the return value, so depend on the presence of + # stderr to know if something failed. + if pkg_spec['*']['stderr']: + pkg_spec['*']['rc'] = 1 + else: + pkg_spec['*']['rc'] = 0 + + +# =========================================== +# Main control flow. +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + build=dict(type='bool', default=False), + snapshot=dict(type='bool', default=False), + ports_dir=dict(type='path', default='/usr/ports'), + quick=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + ), + mutually_exclusive=[['snapshot', 'build']], + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + build = module.params['build'] + ports_dir = module.params['ports_dir'] + + rc = 0 + stdout = '' + stderr = '' + result = {} + result['name'] = name + result['state'] = state + result['build'] = build + + # The data structure used to keep track of package information. + pkg_spec = {} + + if build is True: + if not os.path.isdir(ports_dir): + module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) + + # build sqlports if its not installed yet + parse_package_name(['sqlports'], pkg_spec, module) + get_package_state(['sqlports'], pkg_spec, module) + if not pkg_spec['sqlports']['installed_state']: + module.debug("main(): installing 'sqlports' because build=%s" % module.params['build']) + package_present(['sqlports'], pkg_spec, module) + + asterisk_name = False + for n in name: + if n == '*': + if len(name) != 1: + module.fail_json(msg="the package name '*' can not be mixed with other names") + + asterisk_name = True + + if asterisk_name: + if state != 'latest': + module.fail_json(msg="the package name '*' is only valid when using state=latest") + else: + # Perform an upgrade of all installed packages. + upgrade_packages(pkg_spec, module) + else: + # Parse package names and put results in the pkg_spec dictionary. + parse_package_name(name, pkg_spec, module) + + # Not sure how the branch syntax is supposed to play together + # with build mode. Disable it for now. + for n in name: + if pkg_spec[n]['branch'] and module.params['build'] is True: + module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n)) + + # Get state for all package names. + get_package_state(name, pkg_spec, module) + + # Perform requested action. + if state in ['installed', 'present']: + package_present(name, pkg_spec, module) + elif state in ['absent', 'removed']: + package_absent(name, pkg_spec, module) + elif state == 'latest': + package_latest(name, pkg_spec, module) + + # The combined changed status for all requested packages. If anything + # is changed this is set to True. + combined_changed = False + + # The combined failed status for all requested packages. If anything + # failed this is set to True. + combined_failed = False + + # We combine all error messages in this comma separated string, for example: + # "msg": "Can't find nmapp\n, Can't find nmappp\n" + combined_error_message = '' + + # Loop over all requested package names and check if anything failed or + # changed. + for n in name: + if pkg_spec[n]['rc'] != 0: + combined_failed = True + if pkg_spec[n]['stderr']: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stderr'] + else: + combined_error_message = pkg_spec[n]['stderr'] + else: + if combined_error_message: + combined_error_message += ", %s" % pkg_spec[n]['stdout'] + else: + combined_error_message = pkg_spec[n]['stdout'] + + if pkg_spec[n]['changed'] is True: + combined_changed = True + + # If combined_error_message contains anything at least some part of the + # list of requested package names failed. + if combined_failed: + module.fail_json(msg=combined_error_message, **result) + + result['changed'] = combined_changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/opendj_backendprop.py b/ansible_collections/community/general/plugins/modules/opendj_backendprop.py new file mode 100644 index 000000000..fed53532d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/opendj_backendprop.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: opendj_backendprop +short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command +description: + - This module will update settings for OpenDJ with the command set-backend-prop. + - It will check first via de get-backend-prop if configuration needs to be applied. +author: + - Werner Dijkerman (@dj-wasabi) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + type: path + hostname: + description: + - The hostname of the OpenDJ server. + required: true + type: str + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + type: str + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + type: str + password: + description: + - The password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + type: str + passwordfile: + description: + - Location to the password file which holds the password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + type: path + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + type: str + name: + description: + - The configuration setting to update. + required: true + type: str + value: + description: + - The value for the configuration item. + required: true + type: str + state: + description: + - If configuration needs to be added/updated + required: false + default: "present" + type: str +''' + +EXAMPLES = ''' + - name: Add or update OpenDJ backend properties + action: opendj_backendprop + hostname=localhost + port=4444 + username="cn=Directory Manager" + password=password + backend=userRoot + name=index-entry-limit + value=5000 +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule + + +class BackendProp(object): + + def __init__(self, module): + self._module = module + + def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): + my_command = [ + opendj_bindir + '/dsconfig', + 'get-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '-n', '-X', '-s' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return stdout + else: + self._module.fail_json(msg="Error message: " + str(stderr)) + + def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value): + my_command = [ + opendj_bindir + '/dsconfig', + 'set-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '--set', name + ":" + value, + '-n', '-X' + ] + password_method + rc, stdout, stderr = self._module.run_command(my_command) + if rc == 0: + return True + else: + self._module.fail_json(msg="Error message: " + stderr) + + def validate_data(self, data=None, name=None, value=None): + for config_line in data.split('\n'): + if config_line: + split_line = config_line.split() + if split_line[0] == name: + if split_line[1] == value: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + opendj_bindir=dict(default="/opt/opendj/bin", type="path"), + hostname=dict(required=True), + port=dict(required=True), + username=dict(default="cn=Directory Manager", required=False), + password=dict(required=False, no_log=True), + passwordfile=dict(required=False, type="path"), + backend=dict(required=True), + name=dict(required=True), + value=dict(required=True), + state=dict(default="present"), + ), + supports_check_mode=True, + mutually_exclusive=[['password', 'passwordfile']], + required_one_of=[['password', 'passwordfile']] + ) + + opendj_bindir = module.params['opendj_bindir'] + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + passwordfile = module.params['passwordfile'] + backend_name = module.params['backend'] + name = module.params['name'] + value = module.params['value'] + state = module.params['state'] + + if module.params["password"] is not None: + password_method = ['-w', password] + elif module.params["passwordfile"] is not None: + password_method = ['-j', passwordfile] + + opendj = BackendProp(module) + validate = opendj.get_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name) + + if validate: + if not opendj.validate_data(data=validate, name=name, value=value): + if module.check_mode: + module.exit_json(changed=True) + if opendj.set_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + name=name, + value=value): + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/openwrt_init.py b/ansible_collections/community/general/plugins/modules/openwrt_init.py new file mode 100644 index 000000000..a0e156b33 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/openwrt_init.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Andrew Gaffney +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +module: openwrt_init +author: + - "Andrew Gaffney (@agaffney)" +short_description: Manage services on OpenWrt +description: + - Controls OpenWrt services on remote hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + description: + - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. + C(restarted) will always bounce the service. C(reloaded) will always reload. + choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] + enabled: + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + type: bool + pattern: + type: str + description: + - If the service does not respond to the 'running' command, name a + substring to look for as would be found in the output of the I(ps) + command as a stand-in for a 'running' result. If the string is found, + the service will be assumed to be running. +notes: + - One option other than name is required. +requirements: + - An OpenWrt system (with python) +''' + +EXAMPLES = ''' +- name: Start service httpd, if not running + community.general.openwrt_init: + state: started + name: httpd + +- name: Stop service cron, if running + community.general.openwrt_init: + name: cron + state: stopped + +- name: Reload service httpd, in all cases + community.general.openwrt_init: + name: httpd + state: reloaded + +- name: Enable service httpd + community.general.openwrt_init: + name: httpd + enabled: true +''' + +RETURN = ''' +''' + +import os +from ansible.module_utils.basic import AnsibleModule + +module = None +init_script = None + + +# =============================== +# Check if service is enabled +def is_enabled(): + rc, dummy, dummy = module.run_command([init_script, 'enabled']) + return rc == 0 + + +# =========================================== +# Main control flow +def main(): + global module, init_script + # init + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str', aliases=['service']), + state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + pattern=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=[('state', 'enabled')], + ) + + # initialize + service = module.params['name'] + init_script = '/etc/init.d/' + service + result = { + 'name': service, + 'changed': False, + } + # check if service exists + if not os.path.exists(init_script): + module.fail_json(msg='service %s does not exist' % service) + + # Enable/disable service startup at boot if requested + if module.params['enabled'] is not None: + # do we need to enable the service? + enabled = is_enabled() + + # default to current state + result['enabled'] = enabled + + # Change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + action = 'enable' if module.params['enabled'] else 'disable' + + if not module.check_mode: + rc, dummy, err = module.run_command([init_script, action]) + # openwrt init scripts can return a non-zero exit code on a successful 'enable' + # command if the init script doesn't contain a STOP value, so we ignore the exit + # code and explicitly check if the service is now in the desired state + if is_enabled() != module.params['enabled']: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + result['enabled'] = not enabled + + if module.params['state'] is not None: + running = False + + # check if service is currently running + if module.params['pattern']: + # Find ps binary + psbin = module.get_bin_path('ps', True) + + # this should be busybox ps, so we only want/need to the 'w' option + rc, psout, dummy = module.run_command([psbin, 'w']) + # If rc is 0, set running as appropriate + if rc == 0: + lines = psout.split("\n") + running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines) + else: + rc, dummy, dummy = module.run_command([init_script, 'running']) + if rc == 0: + running = True + + # default to desired state + result['state'] = module.params['state'] + + # determine action, if any + action = None + if module.params['state'] == 'started': + if not running: + action = 'start' + result['changed'] = True + elif module.params['state'] == 'stopped': + if running: + action = 'stop' + result['changed'] = True + else: + action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded + result['state'] = 'started' + result['changed'] = True + + if action: + if not module.check_mode: + rc, dummy, err = module.run_command([init_script, action]) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/opkg.py b/ansible_collections/community/general/plugins/modules/opkg.py new file mode 100644 index 000000000..d2ac314d0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/opkg.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Patrick Pelletier +# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: opkg +author: "Patrick Pelletier (@skinp)" +short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions +description: + - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of package(s) to install/remove. + - C(NAME=VERSION) syntax is also supported to install a package + in a certain version. See the examples. This only works on Yocto based + Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is + supported since community.general 6.2.0. + aliases: [pkg] + required: true + type: list + elements: str + state: + description: + - State of the package. + choices: [ 'present', 'absent', 'installed', 'removed' ] + default: present + type: str + force: + description: + - The C(opkg --force) parameter used. + choices: + - "" + - "depends" + - "maintainer" + - "reinstall" + - "overwrite" + - "downgrade" + - "space" + - "postinstall" + - "remove" + - "checksum" + - "removal-of-dependent-packages" + default: "" + type: str + update_cache: + description: + - Update the package DB first. + default: false + type: bool +requirements: + - opkg + - python +''' +EXAMPLES = ''' +- name: Install foo + community.general.opkg: + name: foo + state: present + +- name: Install foo in version 1.2 (opkg>=0.3.2 on Yocto based Linux distributions) + community.general.opkg: + name: foo=1.2 + state: present + +- name: Update cache and install foo + community.general.opkg: + name: foo + state: present + update_cache: true + +- name: Remove foo + community.general.opkg: + name: foo + state: absent + +- name: Remove foo and bar + community.general.opkg: + name: + - foo + - bar + state: absent + +- name: Install foo using overwrite option forcibly + community.general.opkg: + name: foo + state: present + force: overwrite +''' + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class Opkg(StateModuleHelper): + module = dict( + argument_spec=dict( + name=dict(aliases=["pkg"], required=True, type="list", elements="str"), + state=dict(default="present", choices=["present", "installed", "absent", "removed"]), + force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", + "checksum", "removal-of-dependent-packages"]), + update_cache=dict(default=False, type='bool'), + ), + ) + + def __init_module__(self): + self.vars.set("install_c", 0, output=False, change=True) + self.vars.set("remove_c", 0, output=False, change=True) + + state_map = dict( + query="list-installed", + present="install", + installed="install", + absent="remove", + removed="remove", + ) + + def _force(value): + if value == "": + value = None + return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True) + + self.runner = CmdRunner( + self.module, + command="opkg", + arg_formats=dict( + package=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(state_map), + force=cmd_runner_fmt.as_func(_force), + update_cache=cmd_runner_fmt.as_bool("update") + ), + ) + + if self.vars.update_cache: + rc, dummy, dummy = self.runner("update_cache").run() + if rc != 0: + self.do_raise("could not update package db") + + @staticmethod + def split_name_and_version(package): + """ Split the name and the version when using the NAME=VERSION syntax """ + splitted = package.split('=', 1) + if len(splitted) == 1: + return splitted[0], None + else: + return splitted[0], splitted[1] + + def _package_in_desired_state(self, name, want_installed, version=None): + dummy, out, dummy = self.runner("state package").run(state="query", package=name) + + has_package = out.startswith(name + " - %s" % ("" if not version else (version + " "))) + return want_installed == has_package + + def state_present(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + pkg_name, pkg_version = self.split_name_and_version(package) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall": + ctx.run(package=package) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version): + self.do_raise("failed to install %s" % package) + self.vars.install_c += 1 + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + if self.vars.install_c > 0: + self.vars.msg = "installed %s package(s)" % (self.vars.install_c) + else: + self.vars.msg = "package(s) already present" + + def state_absent(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + package, dummy = self.split_name_and_version(package) + if not self._package_in_desired_state(package, want_installed=False): + ctx.run(package=package) + if not self._package_in_desired_state(package, want_installed=False): + self.do_raise("failed to remove %s" % package) + self.vars.remove_c += 1 + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + if self.vars.remove_c > 0: + self.vars.msg = "removed %s package(s)" % (self.vars.remove_c) + else: + self.vars.msg = "package(s) already absent" + + state_installed = state_present + state_removed = state_absent + + +def main(): + Opkg.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/osx_defaults.py b/ansible_collections/community/general/plugins/modules/osx_defaults.py new file mode 100644 index 000000000..161584373 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/osx_defaults.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, GeekChimp - Franck Nijhof (DO NOT CONTACT!) +# Copyright (c) 2019, Ansible project +# Copyright (c) 2019, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: osx_defaults +author: +# DO NOT RE-ADD GITHUB HANDLE! +- Franck Nijhof (!UNKNOWN) +short_description: Manage macOS user defaults +description: + - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts. + - macOS applications and other programs use the defaults system to record user preferences and other + information that must be maintained when the applications are not running (such as default font for new + documents, or the position of an Info panel). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + description: + - The domain is a domain name of the form C(com.companyname.appname). + type: str + default: NSGlobalDomain + host: + description: + - The host on which the preference should apply. + - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool. + type: str + key: + description: + - The key of the user preference. + type: str + type: + description: + - The type of value to write. + type: str + choices: [ array, bool, boolean, date, float, int, integer, string ] + default: string + array_add: + description: + - Add new elements to the array for a key which has an array as its value. + type: bool + default: false + value: + description: + - The value to write. + - Only required when I(state=present). + type: raw + state: + description: + - The state of the user defaults. + - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled. + - C(list) added in version 2.8. + type: str + choices: [ absent, list, present ] + default: present + path: + description: + - The path in which to search for C(defaults). + type: str + default: /usr/bin:/usr/local/bin +notes: + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +''' + +EXAMPLES = r''' +- name: Set boolean valued key for application domain + community.general.osx_defaults: + domain: com.apple.Safari + key: IncludeInternalDebugMenu + type: bool + value: true + state: present + +- name: Set string valued key for global domain + community.general.osx_defaults: + domain: NSGlobalDomain + key: AppleMeasurementUnits + type: string + value: Centimeters + state: present + +- name: Set int valued key for arbitrary plist + community.general.osx_defaults: + domain: /Library/Preferences/com.apple.SoftwareUpdate + key: AutomaticCheckEnabled + type: int + value: 1 + become: true + +- name: Set int valued key only for the current host + community.general.osx_defaults: + domain: com.apple.screensaver + host: currentHost + key: showClock + type: int + value: 1 + +- name: Defaults to global domain and setting value + community.general.osx_defaults: + key: AppleMeasurementUnits + type: string + value: Centimeters + +- name: Setting an array valued key + community.general.osx_defaults: + key: AppleLanguages + type: array + value: + - en + - nl + +- name: Removing a key + community.general.osx_defaults: + domain: com.geekchimp.macable + key: ExampleKeyToRemove + state: absent +''' + +from datetime import datetime +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import binary_type, text_type + + +# exceptions --------------------------------------------------------------- {{{ +class OSXDefaultsException(Exception): + def __init__(self, msg): + self.message = msg + + +# /exceptions -------------------------------------------------------------- }}} + +# class MacDefaults -------------------------------------------------------- {{{ +class OSXDefaults(object): + """ Class to manage Mac OS user defaults """ + + # init ---------------------------------------------------------------- {{{ + def __init__(self, module): + """ Initialize this module. Finds 'defaults' executable and preps the parameters """ + # Initial var for storing current defaults value + self.current_value = None + self.module = module + self.domain = module.params['domain'] + self.host = module.params['host'] + self.key = module.params['key'] + self.type = module.params['type'] + self.array_add = module.params['array_add'] + self.value = module.params['value'] + self.state = module.params['state'] + self.path = module.params['path'] + + # Try to find the defaults executable + self.executable = self.module.get_bin_path( + 'defaults', + required=False, + opt_dirs=self.path.split(':'), + ) + + if not self.executable: + raise OSXDefaultsException("Unable to locate defaults executable.") + + # Ensure the value is the correct type + if self.state != 'absent': + self.value = self._convert_type(self.type, self.value) + + # /init --------------------------------------------------------------- }}} + + # tools --------------------------------------------------------------- {{{ + @staticmethod + def is_int(value): + as_str = str(value) + if (as_str.startswith("-")): + return as_str[1:].isdigit() + else: + return as_str.isdigit() + + @staticmethod + def _convert_type(data_type, value): + """ Converts value to given type """ + if data_type == "string": + return str(value) + elif data_type in ["bool", "boolean"]: + if isinstance(value, (binary_type, text_type)): + value = value.lower() + if value in [True, 1, "true", "1", "yes"]: + return True + elif value in [False, 0, "false", "0", "no"]: + return False + raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) + elif data_type == "date": + try: + return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + except ValueError: + raise OSXDefaultsException( + "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) + ) + elif data_type in ["int", "integer"]: + if not OSXDefaults.is_int(value): + raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value))) + return int(value) + elif data_type == "float": + try: + value = float(value) + except ValueError: + raise OSXDefaultsException("Invalid float value: {0}".format(repr(value))) + return value + elif data_type == "array": + if not isinstance(value, list): + raise OSXDefaultsException("Invalid value. Expected value to be an array") + return value + + raise OSXDefaultsException('Type is not supported: {0}'.format(data_type)) + + def _host_args(self): + """ Returns a normalized list of commandline arguments based on the "host" attribute """ + if self.host is None: + return [] + elif self.host == 'currentHost': + return ['-currentHost'] + else: + return ['-host', self.host] + + def _base_command(self): + """ Returns a list containing the "defaults" executable and any common base arguments """ + return [self.executable] + self._host_args() + + @staticmethod + def _convert_defaults_str_to_list(value): + """ Converts array output from defaults to an list """ + # Split output of defaults. Every line contains a value + value = value.splitlines() + + # Remove first and last item, those are not actual values + value.pop(0) + value.pop(-1) + + # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes + value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value] + + return value + + # /tools -------------------------------------------------------------- }}} + + # commands ------------------------------------------------------------ {{{ + def read(self): + """ Reads value of this domain & key from defaults """ + # First try to find out the type + rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key]) + + # If RC is 1, the key does not exist + if rc == 1: + return None + + # If the RC is not 0, then terrible happened! Ooooh nooo! + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % err) + + # Ok, lets parse the type from output + data_type = out.strip().replace('Type is ', '') + + # Now get the current value + rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key]) + + # Strip output + out = out.strip() + + # A non zero RC at this point is kinda strange... + if rc != 0: + raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % err) + + # Convert string to list when type is array + if data_type == "array": + out = self._convert_defaults_str_to_list(out) + + # Store the current_value + self.current_value = self._convert_type(data_type, out) + + def write(self): + """ Writes value to this domain & key to defaults """ + # We need to convert some values so the defaults commandline understands it + if isinstance(self.value, bool): + if self.value: + value = "TRUE" + else: + value = "FALSE" + elif isinstance(self.value, (int, float)): + value = str(self.value) + elif self.array_add and self.current_value is not None: + value = list(set(self.value) - set(self.current_value)) + elif isinstance(self.value, datetime): + value = self.value.strftime('%Y-%m-%d %H:%M:%S') + else: + value = self.value + + # When the type is array and array_add is enabled, morph the type :) + if self.type == "array" and self.array_add: + self.type = "array-add" + + # All values should be a list, for easy passing it to the command + if not isinstance(value, list): + value = [value] + + rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value, + expand_user_and_vars=False) + + if rc != 0: + raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % err) + + def delete(self): + """ Deletes defaults key from domain """ + rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) + if rc != 0: + raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % err) + + # /commands ----------------------------------------------------------- }}} + + # run ----------------------------------------------------------------- {{{ + """ Does the magic! :) """ + + def run(self): + + # Get the current value from defaults + self.read() + + if self.state == 'list': + self.module.exit_json(key=self.key, value=self.current_value) + + # Handle absent state + if self.state == "absent": + if self.current_value is None: + return False + if self.module.check_mode: + return True + self.delete() + return True + + # There is a type mismatch! Given type does not match the type in defaults + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): + raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) + + # Current value matches the given value. Nothing need to be done. Arrays need extra care + if self.type == "array" and self.current_value is not None and not self.array_add and \ + set(self.current_value) == set(self.value): + return False + elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0: + return False + elif self.current_value == self.value: + return False + + if self.module.check_mode: + return True + + # Change/Create/Set given key/value for domain in defaults + self.write() + return True + + # /run ---------------------------------------------------------------- }}} + + +# /class MacDefaults ------------------------------------------------------ }}} + + +# main -------------------------------------------------------------------- {{{ +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', default='NSGlobalDomain'), + host=dict(type='str'), + key=dict(type='str', no_log=False), + type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), + array_add=dict(type='bool', default=False), + value=dict(type='raw'), + state=dict(type='str', default='present', choices=['absent', 'list', 'present']), + path=dict(type='str', default='/usr/bin:/usr/local/bin'), + ), + supports_check_mode=True, + required_if=( + ('state', 'present', ['value']), + ), + ) + + try: + defaults = OSXDefaults(module=module) + module.exit_json(changed=defaults.run()) + except OSXDefaultsException as e: + module.fail_json(msg=e.message) + + +# /main ------------------------------------------------------------------- }}} + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py new file mode 100644 index 000000000..cd3639a4c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ovh_ip_failover +short_description: Manage OVH IP failover address +description: + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move + an ip failover (or failover block) between services +author: "Pascal HERAUD (@pascalheraud)" +notes: + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). + You have to create an application (a key and secret) with a consummer + key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) +requirements: + - ovh >= 0.4.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + required: true + description: + - The IP address to manage (can be a single IP like 1.1.1.1 + or a block like 1.1.1.1/28 ) + type: str + service: + required: true + description: + - The name of the OVH service this IP address should be routed + type: str + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu) + type: str + wait_completion: + required: false + default: true + type: bool + description: + - If true, the module will wait for the IP address to be moved. + If false, exit without waiting. The taskId will be returned + in module output + wait_task_completion: + required: false + default: 0 + description: + - If not 0, the module will wait for this task id to be + completed. Use wait_task_completion if you want to wait for + completion of a previously executed task with + wait_completion=false. You can execute this module repeatedly on + a list of failover IPs using wait_completion=false (see examples) + type: int + application_key: + required: true + description: + - The applicationKey to use + type: str + application_secret: + required: true + description: + - The application secret to use + type: str + consumer_key: + required: true + description: + - The consumer key to use + type: str + timeout: + required: false + default: 120 + description: + - The timeout in seconds used to wait for a task to be + completed. Default is 120 seconds. + type: int + +''' + +EXAMPLES = ''' +# Route an IP address 1.1.1.1 to the service ns666.ovh.net +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_completion: false + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + register: moved +- community.general.ovh_ip_failover: + name: 1.1.1.1 + service: ns666.ovh.net + endpoint: ovh-eu + wait_task_completion: "{{moved.taskId}}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import time + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote_plus + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while client.get('/ip/{0}/task'.format(quote_plus(name)), + function='genericMoveFloatingIp', + status='todo'): + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def waitForTaskDone(client, name, taskId, timeout): + currentTimeout = timeout + while True: + task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) + if task['status'] == 'done': + return True + time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API + currentTimeout -= 5 + if currentTimeout < 0: + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + service=dict(required=True), + endpoint=dict(required=True), + wait_completion=dict(default=True, type='bool'), + wait_task_completion=dict(default=0, type='int'), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ), + supports_check_mode=True + ) + + result = dict( + changed=False + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module is required to run this module ') + + # Get parameters + name = module.params.get('name') + service = module.params.get('service') + timeout = module.params.get('timeout') + wait_completion = module.params.get('wait_completion') + wait_task_completion = module.params.get('wait_task_completion') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + ips = client.get('/ip', ip=name, type='failover') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of ips, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in ips and '{0}/32'.format(name) not in ips: + module.fail_json(msg='IP {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + ipproperties = client.get('/ip/{0}'.format(quote_plus(name))) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the properties ' + 'of the ip, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if ipproperties['routedTo']['serviceName'] != service: + if not module.check_mode: + if wait_task_completion == 0: + # Move the IP and get the created taskId + task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service) + taskId = task['taskId'] + result['moved'] = True + else: + # Just wait for the given taskId to be completed + taskId = wait_task_completion + result['moved'] = False + result['taskId'] = taskId + if wait_completion or wait_task_completion != 0: + if not waitForTaskDone(client, name, taskId, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of move ip to service'.format(timeout)) + result['waited'] = True + else: + result['waited'] = False + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py b/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py new file mode 100644 index 000000000..f70b5804a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: ovh_ip_loadbalancing_backend +short_description: Manage OVH IP LoadBalancing backends +description: + - Manage OVH (French European hosting provider) LoadBalancing IP backends +author: Pascal Heraud (@pascalheraud) +notes: + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). + You have to create an application (a key and secret) with a consumer + key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) +requirements: + - ovh > 0.3.5 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + required: true + description: + - Name of the LoadBalancing internal name (ip-X.X.X.X) + type: str + backend: + required: true + description: + - The IP address of the backend to update / modify / delete + type: str + state: + default: present + choices: ['present', 'absent'] + description: + - Determines whether the backend is to be created/modified + or deleted + type: str + probe: + default: 'none' + choices: ['none', 'http', 'icmp' , 'oco'] + description: + - Determines the type of probe to use for this backend + type: str + weight: + default: 8 + description: + - Determines the weight for this backend + type: int + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu) + type: str + application_key: + required: true + description: + - The applicationKey to use + type: str + application_secret: + required: true + description: + - The application secret to use + type: str + consumer_key: + required: true + description: + - The consumer key to use + type: str + timeout: + default: 120 + description: + - The timeout in seconds used to wait for a task to be + completed. + type: int + +''' + +EXAMPLES = ''' +- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' + ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: present + probe: none + weight: 8 + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + +- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' + ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: absent + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import time + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +from ansible.module_utils.basic import AnsibleModule + + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + backend=dict(required=True), + weight=dict(default=8, type='int'), + probe=dict(default='none', + choices=['none', 'http', 'icmp', 'oco']), + state=dict(default='present', choices=['present', 'absent']), + endpoint=dict(required=True), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ) + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module' + 'is required to run this module ') + + # Get parameters + name = module.params.get('name') + state = module.params.get('state') + backend = module.params.get('backend') + weight = module.params.get('weight') + probe = module.params.get('probe') + timeout = module.params.get('timeout') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + loadBalancings = client.get('/ip/loadBalancing') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of loadBalancing, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in loadBalancings: + module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of backends ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + backendExists = backend in backends + moduleChanged = False + if state == "absent": + if backendExists: + # Remove backend + try: + client.delete( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of removing backend task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for deleting the backend, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + else: + if backendExists: + # Get properties + try: + backendProperties = client.get( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the backend properties, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if (backendProperties['weight'] != weight): + # Change weight + try: + client.post( + '/ip/loadBalancing/{0}/backend/{1}/setWeight' + .format(name, backend), weight=weight) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of setWeight to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the weight of the ' + 'backend, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + + if (backendProperties['probe'] != probe): + # Change probe + backendProperties['probe'] = probe + try: + client.put( + '/ip/loadBalancing/{0}/backend/{1}' + .format(name, backend), probe=probe) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'setProbe to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the probe of ' + 'the backend, check application key, secret, ' + 'consumerkey and parameters. Error returned by OVH api ' + 'was : {0}' + .format(apiError)) + moduleChanged = True + + else: + # Creates backend + try: + try: + client.post('/ip/loadBalancing/{0}/backend'.format(name), + ipBackend=backend, probe=probe, weight=weight) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}' + .format(apiError)) + + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'backend creation task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + moduleChanged = True + + module.exit_json(changed=moduleChanged) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py new file mode 100644 index 000000000..43d64e618 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Francois Lallart (@fraff) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: ovh_monthly_billing +author: Francois Lallart (@fraff) +version_added: '0.2.0' +short_description: Manage OVH monthly billing +description: + - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it). +requirements: [ "ovh" ] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + project_id: + required: true + type: str + description: + - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) + instance_id: + required: true + type: str + description: + - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) + endpoint: + type: str + description: + - The endpoint to use (for instance ovh-eu) + application_key: + type: str + description: + - The applicationKey to use + application_secret: + type: str + description: + - The application secret to use + consumer_key: + type: str + description: + - The consumer key to use +''' + +EXAMPLES = ''' +- name: Basic usage, using auth from /etc/ovh.conf + community.general.ovh_monthly_billing: + project_id: 0c727a20aa144485b70c44dee9123b46 + instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948 + +# Get openstack cloud ID and instance ID, OVH use them in its API +- name: Get openstack cloud ID and instance ID + os_server_info: + cloud: myProjectName + region_name: myRegionName + server: myServerName + register: openstack_servers + +- name: Use IDs + community.general.ovh_monthly_billing: + project_id: "{{ openstack_servers.0.tenant_id }}" + instance_id: "{{ openstack_servers.0.id }}" + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import traceback + +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + OVH_IMPORT_ERROR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + project_id=dict(required=True), + instance_id=dict(required=True), + endpoint=dict(required=False), + application_key=dict(required=False, no_log=True), + application_secret=dict(required=False, no_log=True), + consumer_key=dict(required=False, no_log=True), + ), + supports_check_mode=True + ) + + # Get parameters + project_id = module.params.get('project_id') + instance_id = module.params.get('instance_id') + endpoint = module.params.get('endpoint') + application_key = module.params.get('application_key') + application_secret = module.params.get('application_secret') + consumer_key = module.params.get('consumer_key') + project = "" + instance = "" + ovh_billing_status = "" + + if not HAS_OVH: + module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh') + + # Connect to OVH API + client = ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + # Check that the instance exists + try: + project = client.get('/cloud/project/{0}'.format(project_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='project {0} does not exist'.format(project_id)) + + # Check that the instance exists + try: + instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id)) + except ovh.exceptions.ResourceNotFoundError: + module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id)) + + # Is monthlyBilling already enabled or pending ? + if instance['monthlyBilling'] is not None: + if instance['monthlyBilling']['status'] in ['ok', 'activationPending']: + module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling']) + + if module.check_mode: + module.exit_json(changed=True, msg="Dry Run!") + + try: + ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id)) + module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling']) + except APIError as apiError: + module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError)) + + # We should never reach here + module.fail_json(msg='Internal ovh_monthly_billing module error') + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py new file mode 100644 index 000000000..47b827908 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Mathieu Bultel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: pacemaker_cluster +short_description: Manage pacemaker clusters +author: + - Mathieu Bultel (@matbu) +description: + - This module can manage a pacemaker cluster and nodes from Ansible using + the pacemaker cli. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the cluster + choices: [ cleanup, offline, online, restart ] + type: str + node: + description: + - Specify which node of the cluster you want to manage. None == the + cluster status itself, 'all' == check the status of all nodes. + type: str + timeout: + description: + - Timeout when the module should considered that the action has failed + default: 300 + type: int + force: + description: + - Force the change of the cluster state + type: bool + default: true +''' +EXAMPLES = ''' +--- +- name: Set cluster Online + hosts: localhost + gather_facts: false + tasks: + - name: Get cluster state + community.general.pacemaker_cluster: + state: online +''' + +RETURN = ''' +changed: + description: true if the cluster state has changed + type: bool + returned: always +out: + description: The output of the current state of the cluster. It return a + list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +rc: + description: exit code of the module + type: bool + returned: always +''' + +import time + +from ansible.module_utils.basic import AnsibleModule + + +_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" + + +def get_cluster_status(module): + cmd = "pcs cluster status" + rc, out, err = module.run_command(cmd) + if out in _PCS_CLUSTER_DOWN: + return 'offline' + else: + return 'online' + + +def get_node_status(module, node='all'): + if node == 'all': + cmd = "pcs cluster pcsd-status %s" % node + else: + cmd = "pcs cluster pcsd-status" + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + status = [] + for o in out.splitlines(): + status.append(o.split(':')) + return status + + +def clean_cluster(module, timeout): + cmd = "pcs resource cleanup" + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + +def set_cluster(module, state, timeout, force): + if state == 'online': + cmd = "pcs cluster start" + if state == 'offline': + cmd = "pcs cluster stop" + if force: + cmd = "%s --force" % cmd + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + t = time.time() + ready = False + while time.time() < t + timeout: + cluster_state = get_cluster_status(module) + if cluster_state == state: + ready = True + break + if not ready: + module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) + + +def set_node(module, state, timeout, force, node='all'): + # map states + if state == 'online': + cmd = "pcs cluster start" + if state == 'offline': + cmd = "pcs cluster stop" + if force: + cmd = "%s --force" % cmd + + nodes_state = get_node_status(module, node) + for node in nodes_state: + if node[1].strip().lower() != state: + cmd = "%s %s" % (cmd, node[0].strip()) + rc, out, err = module.run_command(cmd) + if rc == 1: + module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) + + t = time.time() + ready = False + while time.time() < t + timeout: + nodes_state = get_node_status(module) + for node in nodes_state: + if node[1].strip().lower() == state: + ready = True + break + if not ready: + module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) + + +def main(): + argument_spec = dict( + state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), + node=dict(type='str'), + timeout=dict(type='int', default=300), + force=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + ) + changed = False + state = module.params['state'] + node = module.params['node'] + force = module.params['force'] + timeout = module.params['timeout'] + + if state in ['online', 'offline']: + # Get cluster status + if node is None: + cluster_state = get_cluster_status(module) + if cluster_state == state: + module.exit_json(changed=changed, out=cluster_state) + else: + set_cluster(module, state, timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == state: + module.exit_json(changed=True, out=cluster_state) + else: + module.fail_json(msg="Fail to bring the cluster %s" % state) + else: + cluster_state = get_node_status(module, node) + # Check cluster state + for node_state in cluster_state: + if node_state[1].strip().lower() == state: + module.exit_json(changed=changed, out=cluster_state) + else: + # Set cluster status if needed + set_cluster(module, state, timeout, force) + cluster_state = get_node_status(module, node) + module.exit_json(changed=True, out=cluster_state) + + if state in ['restart']: + set_cluster(module, 'offline', timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == 'offline': + set_cluster(module, 'online', timeout, force) + cluster_state = get_cluster_status(module) + if cluster_state == 'online': + module.exit_json(changed=True, out=cluster_state) + else: + module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started") + else: + module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped") + + if state in ['cleanup']: + clean_cluster(module, timeout) + cluster_state = get_cluster_status(module) + module.exit_json(changed=True, + out=cluster_state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_device.py b/ansible_collections/community/general/plugins/modules/packet_device.py new file mode 100644 index 000000000..d220c5f8f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_device.py @@ -0,0 +1,682 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2016, Tomas Karasek +# Copyright (c) 2016, Matt Baldwin +# Copyright (c) 2016, Thibaud Morel l'Horset +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: packet_device + +short_description: Manage a bare metal server in the Packet Host + +description: + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). + + +author: + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + auth_token: + description: + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + count: + description: + - The number of devices to create. Count number can be included in hostname via the %d string formatter. + default: 1 + type: int + + count_offset: + description: + - From which number to start the count. + default: 1 + type: int + + device_ids: + description: + - List of device IDs on which to operate. + type: list + elements: str + + tags: + description: + - List of device tags. + - Currently implemented only for device creation. + type: list + elements: str + version_added: '0.2.0' + + facility: + description: + - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). + type: str + + features: + description: + - Dict with "features" for device creation. See Packet API docs for details. + type: dict + + hostnames: + description: + - A hostname of a device, or a list of hostnames. + - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). + - If only one hostname, it might be expanded to list if I(count)>1. + aliases: [name] + type: list + elements: str + + locked: + description: + - Whether to lock a created device. + default: false + aliases: [lock] + type: bool + + operating_system: + description: + - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). + type: str + + plan: + description: + - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). + type: str + + project_id: + description: + - ID of project of the device. + required: true + type: str + + state: + description: + - Desired state of the device. + - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. + - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). + choices: [present, absent, active, inactive, rebooted] + default: present + type: str + + user_data: + description: + - Userdata blob made available to the machine + type: str + + wait_for_public_IPv: + description: + - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. + - If set to 4, it will wait until IPv4 is assigned to the instance. + - If set to 6, wait until public IPv6 is assigned to the instance. + choices: [4,6] + type: int + + wait_timeout: + description: + - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). + - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. + default: 900 + type: int + + ipxe_script_url: + description: + - URL of custom iPXE script for provisioning. + - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). + type: str + default: '' + + always_pxe: + description: + - Persist PXE as the first boot option. + - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. + default: false + type: bool + + +requirements: + - "packet-python >= 1.35" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +# Creating devices + +- name: Create 1 device + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + tags: ci-xyz + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +# Create the same device and wait until it is in state "active", (when it's +# ready for other API operations). Fail if the device is not "active" in +# 10 minutes. + +- name: Create device and wait up to 10 minutes for active state + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 + +- name: Create 3 ubuntu devices called server-01, server-02 and server-03 + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH + hosts: localhost + tasks: + - name: Create 3 devices and register their facts + community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: Wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" + + +# Other states of devices + +- name: Remove 3 devices by uuid + hosts: localhost + tasks: + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +''' + +RETURN = ''' +changed: + description: True if a device was altered in any way (created, modified or removed) + type: bool + sample: true + returned: success + +devices: + description: Information about each device that was processed + type: list + sample: + - { + "hostname": "my-server.com", + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", + "public_ipv4": "147.229.15.12", + "private-ipv4": "10.0.15.12", + "tags": [], + "locked": false, + "state": "provisioning", + "public_ipv6": "2604:1380:2:5200::3" + } + returned: success +''' # NOQA + + +import os +import re +import time +import uuid +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +MAX_DEVICES = 100 + +PACKET_DEVICE_STATES = ( + 'queued', + 'provisioning', + 'failed', + 'powering_on', + 'active', + 'powering_off', + 'inactive', + 'rebooting', +) + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] + + +def serialize_device(device): + """ + Standard representation for a device as returned by various tasks:: + + { + 'id': 'device_id' + 'hostname': 'device_hostname', + 'tags': [], + 'locked': false, + 'state': 'provisioning', + 'ip_addresses': [ + { + "address": "147.75.194.227", + "address_family": 4, + "public": true + }, + { + "address": "2604:1380:2:5200::3", + "address_family": 6, + "public": true + }, + { + "address": "10.100.11.129", + "address_family": 4, + "public": false + } + ], + "private_ipv4": "10.100.11.129", + "public_ipv4": "147.75.194.227", + "public_ipv6": "2604:1380:2:5200::3", + } + + """ + device_data = {} + device_data['id'] = device.id + device_data['hostname'] = device.hostname + device_data['tags'] = device.tags + device_data['locked'] = device.locked + device_data['state'] = device.state + device_data['ip_addresses'] = [ + { + 'address': addr_data['address'], + 'address_family': addr_data['address_family'], + 'public': addr_data['public'], + } + for addr_data in device.ip_addresses + ] + # Also include each IPs as a key for easier lookup in roles. + # Key names: + # - public_ipv4 + # - public_ipv6 + # - private_ipv4 + # - private_ipv6 (if there is one) + for ipdata in device_data['ip_addresses']: + if ipdata['public']: + if ipdata['address_family'] == 6: + device_data['public_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['public_ipv4'] = ipdata['address'] + elif not ipdata['public']: + if ipdata['address_family'] == 6: + # Packet doesn't give public ipv6 yet, but maybe one + # day they will + device_data['private_ipv6'] = ipdata['address'] + elif ipdata['address_family'] == 4: + device_data['private_ipv4'] = ipdata['address'] + return device_data + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def listify_string_name_or_id(s): + if ',' in s: + return s.split(',') + else: + return [s] + + +def get_hostname_list(module): + # hostname is a list-typed param, so I guess it should return list + # (and it does, in Ansible 2.2.1) but in order to be defensive, + # I keep here the code to convert an eventual string to list + hostnames = module.params.get('hostnames') + count = module.params.get('count') + count_offset = module.params.get('count_offset') + if isinstance(hostnames, str): + hostnames = listify_string_name_or_id(hostnames) + if not isinstance(hostnames, list): + raise Exception("name %s is not convertible to list" % hostnames) + + # at this point, hostnames is a list + hostnames = [h.strip() for h in hostnames] + + if (len(hostnames) > 1) and (count > 1): + _msg = ("If you set count>1, you should only specify one hostname " + "with the %d formatter, not a list of hostnames.") + raise Exception(_msg) + + if (len(hostnames) == 1) and (count > 0): + hostname_spec = hostnames[0] + count_range = range(count_offset, count_offset + count) + if re.search(r"%\d{0,2}d", hostname_spec): + hostnames = [hostname_spec % i for i in count_range] + elif count > 1: + hostname_spec = '%s%%02d' % hostname_spec + hostnames = [hostname_spec % i for i in count_range] + + for hn in hostnames: + if not is_valid_hostname(hn): + raise Exception("Hostname '%s' does not seem to be valid" % hn) + + if len(hostnames) > MAX_DEVICES: + raise Exception("You specified too many hostnames, max is %d" % + MAX_DEVICES) + return hostnames + + +def get_device_id_list(module): + device_ids = module.params.get('device_ids') + + if isinstance(device_ids, str): + device_ids = listify_string_name_or_id(device_ids) + + device_ids = [di.strip() for di in device_ids] + + for di in device_ids: + if not is_valid_uuid(di): + raise Exception("Device ID '%s' does not seem to be valid" % di) + + if len(device_ids) > MAX_DEVICES: + raise Exception("You specified too many devices, max is %d" % + MAX_DEVICES) + return device_ids + + +def create_single_device(module, packet_conn, hostname): + + for param in ('hostnames', 'operating_system', 'plan'): + if not module.params.get(param): + raise Exception("%s parameter is required for new device." + % param) + project_id = module.params.get('project_id') + plan = module.params.get('plan') + tags = module.params.get('tags') + user_data = module.params.get('user_data') + facility = module.params.get('facility') + operating_system = module.params.get('operating_system') + locked = module.params.get('locked') + ipxe_script_url = module.params.get('ipxe_script_url') + always_pxe = module.params.get('always_pxe') + if operating_system != 'custom_ipxe': + for param in ('ipxe_script_url', 'always_pxe'): + if module.params.get(param): + raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) + + device = packet_conn.create_device( + project_id=project_id, + hostname=hostname, + tags=tags, + plan=plan, + facility=facility, + operating_system=operating_system, + userdata=user_data, + locked=locked, + ipxe_script_url=ipxe_script_url, + always_pxe=always_pxe) + return device + + +def refresh_device_list(module, packet_conn, devices): + device_ids = [d.id for d in devices] + new_device_list = get_existing_devices(module, packet_conn) + return [d for d in new_device_list if d.id in device_ids] + + +def wait_for_devices_active(module, packet_conn, watched_devices): + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + refreshed = watched_devices + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, watched_devices) + if all(d.state == 'active' for d in refreshed): + return refreshed + time.sleep(5) + raise Exception("Waiting for state \"active\" timed out for devices: %s" + % [d.hostname for d in refreshed if d.state != "active"]) + + +def wait_for_public_IPv(module, packet_conn, created_devices): + + def has_public_ip(addr_list, ip_v): + return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) + + def all_have_public_ip(ds, ip_v): + return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) + + address_family = module.params.get('wait_for_public_IPv') + + wait_timeout = module.params.get('wait_timeout') + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + refreshed = refresh_device_list(module, packet_conn, created_devices) + if all_have_public_ip(refreshed, address_family): + return refreshed + time.sleep(5) + + raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" + % (address_family, [d.hostname for d in created_devices])) + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + return packet_conn.list_devices( + project_id, params={ + 'per_page': MAX_DEVICES}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_ids'): + device_id_list = get_device_id_list(module) + return {'ids': device_id_list, 'hostnames': []} + elif module.params.get('hostnames'): + hostname_list = get_hostname_list(module) + return {'hostnames': hostname_list, 'ids': []} + + +def act_on_devices(module, packet_conn, target_state): + specified_identifiers = get_specified_device_identifiers(module) + existing_devices = get_existing_devices(module, packet_conn) + changed = False + create_hostnames = [] + if target_state in ['present', 'active', 'rebooted']: + # states where we might create non-existing specified devices + existing_devices_names = [ed.hostname for ed in existing_devices] + create_hostnames = [hn for hn in specified_identifiers['hostnames'] + if hn not in existing_devices_names] + + process_devices = [d for d in existing_devices + if (d.id in specified_identifiers['ids']) or + (d.hostname in specified_identifiers['hostnames'])] + + if target_state != 'present': + _absent_state_map = {} + for s in PACKET_DEVICE_STATES: + _absent_state_map[s] = packet.Device.delete + + state_map = { + 'absent': _absent_state_map, + 'active': {'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + 'inactive': {'active': packet.Device.power_off}, + 'rebooted': {'active': packet.Device.reboot, + 'inactive': packet.Device.power_on, + 'provisioning': None, 'rebooting': None + }, + } + + # First do non-creation actions, it might be faster + for d in process_devices: + if d.state == target_state: + continue + if d.state in state_map[target_state]: + api_operation = state_map[target_state].get(d.state) + if api_operation is not None: + api_operation(d) + changed = True + else: + _msg = ( + "I don't know how to process existing device %s from state %s " + "to state %s" % + (d.hostname, d.state, target_state)) + raise Exception(_msg) + + # At last create missing devices + created_devices = [] + if create_hostnames: + created_devices = [create_single_device(module, packet_conn, n) + for n in create_hostnames] + if module.params.get('wait_for_public_IPv'): + created_devices = wait_for_public_IPv( + module, packet_conn, created_devices) + changed = True + + processed_devices = created_devices + process_devices + if target_state == 'active': + processed_devices = wait_for_devices_active( + module, packet_conn, processed_devices) + + return { + 'changed': changed, + 'devices': [serialize_device(d) for d in processed_devices] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + count=dict(type='int', default=1), + count_offset=dict(type='int', default=1), + device_ids=dict(type='list', elements='str'), + facility=dict(), + features=dict(type='dict'), + hostnames=dict(type='list', elements='str', aliases=['name']), + tags=dict(type='list', elements='str'), + locked=dict(type='bool', default=False, aliases=['lock']), + operating_system=dict(), + plan=dict(), + project_id=dict(required=True), + state=dict(choices=ALLOWED_STATES, default='present'), + user_data=dict(), + wait_for_public_IPv=dict(type='int', choices=[4, 6]), + wait_timeout=dict(type='int', default=900), + ipxe_script_url=dict(default=''), + always_pxe=dict(type='bool', default=False), + ), + required_one_of=[('device_ids', 'hostnames',)], + mutually_exclusive=[ + ('hostnames', 'device_ids'), + ('count', 'device_ids'), + ('count_offset', 'device_ids'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_devices(module, packet_conn, state)) + except Exception as e: + module.fail_json(msg='failed to set device state %s, error: %s' % + (state, to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py new file mode 100644 index 000000000..afeb7ea04 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: packet_ip_subnet + +short_description: Assign IP subnet to a bare metal server + +description: + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. + +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + auth_token: + description: + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + hostname: + description: + - A hostname of a device to/from which to assign/remove a subnet. + required: false + type: str + + device_id: + description: + - UUID of a device to/from which to assign/remove a subnet. + required: false + type: str + + project_id: + description: + - UUID of a project of the device to/from which to assign/remove a subnet. + type: str + + device_count: + description: + - The number of devices to retrieve from the project. The max allowed value is 1000. + - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info. + default: 100 + type: int + + cidr: + description: + - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host. + aliases: [name] + type: str + required: true + + state: + description: + - Desired state of the IP subnet on the specified device. + - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device. + - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices. + - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to. + choices: ['present', 'absent'] + default: 'present' + type: str + +requirements: + - "packet-python >= 1.35" + - "python >= 2.6" +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass it to the auth_token parameter of the module instead. + +- name: Create 1 device and assign an arbitrary public IPv4 subnet to it + hosts: localhost + tasks: + + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + +# Pick an IPv4 address from a block allocated to your project. + + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" + +# Release IP address 147.75.201.78 + +- name: Unassign IP address from any device in your project + hosts: localhost + tasks: + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +''' + +RETURN = ''' +changed: + description: True if an IP address assignments were altered in any way (created or removed). + type: bool + sample: true + returned: success + +device_id: + type: str + description: UUID of the device associated with the specified IP address. + returned: success + +subnet: + description: Dict with data about the handled IP subnet. + type: dict + sample: + address: 147.75.90.241 + address_family: 4 + assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } + cidr: 31 + created_at: '2017-08-07T15:15:30Z' + enabled: true + gateway: 147.75.90.240 + href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f + id: 1eda960-0a16-4c0f-b196-f3dc4928529f + manageable: true + management: true + netmask: 255.255.255.254 + network: 147.75.90.240 + public: true + returned: success +''' + + +import uuid +import re + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') +HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) +PROJECT_MAX_DEVICES = 100 + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +ALLOWED_STATES = ['absent', 'present'] + + +def is_valid_hostname(hostname): + return re.match(HOSTNAME_RE, hostname) is not None + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_existing_devices(module, packet_conn): + project_id = module.params.get('project_id') + if not is_valid_uuid(project_id): + raise Exception("Project ID {0} does not seem to be valid".format(project_id)) + + per_page = module.params.get('device_count') + return packet_conn.list_devices( + project_id, params={'per_page': per_page}) + + +def get_specified_device_identifiers(module): + if module.params.get('device_id'): + _d_id = module.params.get('device_id') + if not is_valid_uuid(_d_id): + raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id)) + return {'device_id': _d_id, 'hostname': None} + elif module.params.get('hostname'): + _hn = module.params.get('hostname') + if not is_valid_hostname(_hn): + raise Exception("Hostname '{0}' does not seem to be valid".format(_hn)) + return {'hostname': _hn, 'device_id': None} + else: + return {'hostname': None, 'device_id': None} + + +def parse_subnet_cidr(cidr): + if "/" not in cidr: + raise Exception("CIDR expression in wrong format, must be address/prefix_len") + addr, prefixlen = cidr.split("/") + try: + prefixlen = int(prefixlen) + except ValueError: + raise Exception("Wrong prefix length in CIDR expression {0}".format(cidr)) + return addr, prefixlen + + +def act_on_assignment(target_state, module, packet_conn): + return_dict = {'changed': False} + specified_cidr = module.params.get("cidr") + address, prefixlen = parse_subnet_cidr(specified_cidr) + + specified_identifier = get_specified_device_identifiers(module) + + if module.check_mode: + return return_dict + + if (specified_identifier['hostname'] is None) and ( + specified_identifier['device_id'] is None): + if target_state == 'absent': + # The special case to release the IP from any assignment + for d in get_existing_devices(module, packet_conn): + for ia in d.ip_addresses: + if address == ia['address'] and prefixlen == ia['cidr']: + packet_conn.call_api(ia['href'], "DELETE") + return_dict['changed'] = True + return_dict['subnet'] = ia + return_dict['device_id'] = d.id + return return_dict + raise Exception("If you assign an address, you must specify either " + "target device ID or target unique hostname.") + + if specified_identifier['device_id'] is not None: + device = packet_conn.get_device(specified_identifier['device_id']) + else: + all_devices = get_existing_devices(module, packet_conn) + hn = specified_identifier['hostname'] + matching_devices = [d for d in all_devices if d.hostname == hn] + if len(matching_devices) > 1: + raise Exception("There are more than one devices matching given hostname {0}".format(hn)) + if len(matching_devices) == 0: + raise Exception("There is no device matching given hostname {0}".format(hn)) + device = matching_devices[0] + + return_dict['device_id'] = device.id + assignment_dicts = [i for i in device.ip_addresses + if i['address'] == address and i['cidr'] == prefixlen] + if len(assignment_dicts) > 1: + raise Exception("IP address {0} is assigned more than once for device {1}".format( + specified_cidr, device.hostname)) + + if target_state == "absent": + if len(assignment_dicts) == 1: + packet_conn.call_api(assignment_dicts[0]['href'], "DELETE") + return_dict['subnet'] = assignment_dicts[0] + return_dict['changed'] = True + elif target_state == "present": + if len(assignment_dicts) == 0: + new_assignment = packet_conn.call_api( + "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)}) + return_dict['changed'] = True + return_dict['subnet'] = new_assignment + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + device_id=dict(type='str'), + hostname=dict(type='str'), + project_id=dict(type='str'), + device_count=dict(type='int', default=PROJECT_MAX_DEVICES), + cidr=dict(type='str', required=True, aliases=['name']), + state=dict(choices=ALLOWED_STATES, default='present'), + ), + supports_check_mode=True, + mutually_exclusive=[('hostname', 'device_id')], + required_one_of=[['hostname', 'device_id', 'project_id']], + required_by=dict( + hostname=('project_id',), + ), + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + try: + module.exit_json(**act_on_assignment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_project.py b/ansible_collections/community/general/plugins/modules/packet_project.py new file mode 100644 index 000000000..da4a2bb89 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_project.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2019, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: packet_project + +short_description: Create/delete a project in Packet host + +description: + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). + +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + + payment_method: + description: + - Payment method is name of one of the payment methods available to your user. + - When blank, the API assumes the default payment method. + type: str + + auth_token: + description: + - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + name: + description: + - Name for/of the project. + type: str + + org_id: + description: + - UUID of the organization to create a project for. + - When blank, the API assumes the default organization. + type: str + + id: + description: + - UUID of the project which you want to remove. + type: str + + custom_data: + description: + - Custom data about the project to create. + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.40" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: Create new project + hosts: localhost + tasks: + community.general.packet_project: + name: "new project" + +- name: Create new project within non-default organization + hosts: localhost + tasks: + community.general.packet_project: + name: "my org project" + org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0 + +- name: Remove project by id + hosts: localhost + tasks: + community.general.packet_project: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 + +- name: Create new project with non-default billing method + hosts: localhost + tasks: + community.general.packet_project: + name: "newer project" + payment_method: "the other visa" +''' + +RETURN = ''' +changed: + description: True if a project was created or removed. + type: bool + sample: true + returned: success + +name: + description: Name of addressed project. + type: str + returned: success + +id: + description: UUID of addressed project. + type: str + returned: success +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def act_on_project(target_state, module, packet_conn): + result_dict = {'changed': False} + given_id = module.params.get('id') + given_name = module.params.get('name') + if given_id: + matching_projects = [ + p for p in packet_conn.list_projects() if given_id == p.id] + else: + matching_projects = [ + p for p in packet_conn.list_projects() if given_name == p.name] + + if target_state == 'present': + if len(matching_projects) == 0: + org_id = module.params.get('org_id') + custom_data = module.params.get('custom_data') + payment_method = module.params.get('payment_method') + + if not org_id: + params = { + "name": given_name, + "payment_method_id": payment_method, + "customdata": custom_data + } + new_project_data = packet_conn.call_api("projects", "POST", params) + new_project = packet.Project(new_project_data, packet_conn) + else: + new_project = packet_conn.create_organization_project( + org_id=org_id, + name=given_name, + payment_method_id=payment_method, + customdata=custom_data + ) + + result_dict['changed'] = True + matching_projects.append(new_project) + + result_dict['name'] = matching_projects[0].name + result_dict['id'] = matching_projects[0].id + else: + if len(matching_projects) > 1: + _msg = ("More than projects matched for module call with state = absent: " + "{0}".format(to_native(matching_projects))) + module.fail_json(msg=_msg) + + if len(matching_projects) == 1: + p = matching_projects[0] + result_dict['name'] = p.name + result_dict['id'] = p.id + result_dict['changed'] = True + try: + p.delete() + except Exception as e: + _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format( + p.name, p.id, to_native(e))) + module.fail_json(msg=_msg) + return result_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + name=dict(type='str'), + id=dict(type='str'), + org_id=dict(type='str'), + payment_method=dict(type='str'), + custom_data=dict(type='str'), + ), + supports_check_mode=True, + required_one_of=[("name", "id",)], + mutually_exclusive=[ + ('name', 'id'), + ] + ) + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_project(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set project state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/packet_sshkey.py new file mode 100644 index 000000000..97f55ba23 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_sshkey.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2016 Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: packet_sshkey +short_description: Create/delete an SSH key in Packet host +description: + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). +author: "Tomas Karasek (@t0mk) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + type: str + auth_token: + description: + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + label: + description: + - Label for the key. If you keep it empty, it will be read from key string. + type: str + aliases: [name] + id: + description: + - UUID of the key which you want to remove. + type: str + fingerprint: + description: + - Fingerprint of the key which you want to remove. + type: str + key: + description: + - Public Key string ({type} {base64 encoded key} {description}). + type: str + key_file: + description: + - File with the public key. + type: path + +requirements: + - "python >= 2.6" + - packet-python + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- name: Create sshkey from string + hosts: localhost + tasks: + community.general.packet_sshkey: + key: "{{ lookup('file', 'my_packet_sshkey.pub') }}" + +- name: Create sshkey from file + hosts: localhost + tasks: + community.general.packet_sshkey: + label: key from file + key_file: ~/ff.pub + +- name: Remove sshkey by id + hosts: localhost + tasks: + community.general.packet_sshkey: + state: absent + id: eef49903-7a09-4ca1-af67-4087c29ab5b6 +''' + +RETURN = ''' +changed: + description: True if a sshkey was created or removed. + type: bool + sample: true + returned: always +sshkeys: + description: Information about sshkeys that were created/removed. + type: list + sample: [ + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", + "label": "mynewkey33" + } + ] + returned: always +''' # NOQA + +import os +import uuid + +from ansible.module_utils.basic import AnsibleModule + +HAS_PACKET_SDK = True +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + + +def serialize_sshkey(sshkey): + sshkey_data = {} + copy_keys = ['id', 'key', 'label', 'fingerprint'] + for name in copy_keys: + sshkey_data[name] = getattr(sshkey, name) + return sshkey_data + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def load_key_string(key_str): + ret_dict = {} + key_str = key_str.strip() + ret_dict['key'] = key_str + cut_key = key_str.split() + if len(cut_key) in [2, 3]: + if len(cut_key) == 3: + ret_dict['label'] = cut_key[2] + else: + raise Exception("Public key %s is in wrong format" % key_str) + return ret_dict + + +def get_sshkey_selector(module): + key_id = module.params.get('id') + if key_id: + if not is_valid_uuid(key_id): + raise Exception("sshkey ID %s is not valid UUID" % key_id) + selecting_fields = ['label', 'fingerprint', 'id', 'key'] + select_dict = {} + for f in selecting_fields: + if module.params.get(f) is not None: + select_dict[f] = module.params.get(f) + + if module.params.get('key_file'): + with open(module.params.get('key_file')) as _file: + loaded_key = load_key_string(_file.read()) + select_dict['key'] = loaded_key['key'] + if module.params.get('label') is None: + if loaded_key.get('label'): + select_dict['label'] = loaded_key['label'] + + def selector(k): + if 'key' in select_dict: + # if key string is specified, compare only the key strings + return k.key == select_dict['key'] + else: + # if key string not specified, all the fields must match + return all(select_dict[f] == getattr(k, f) for f in select_dict) + return selector + + +def act_on_sshkeys(target_state, module, packet_conn): + selector = get_sshkey_selector(module) + existing_sshkeys = packet_conn.list_ssh_keys() + matching_sshkeys = filter(selector, existing_sshkeys) + changed = False + if target_state == 'present': + if matching_sshkeys == []: + # there is no key matching the fields from module call + # => create the key, label and + newkey = {} + if module.params.get('key_file'): + with open(module.params.get('key_file')) as f: + newkey = load_key_string(f.read()) + if module.params.get('key'): + newkey = load_key_string(module.params.get('key')) + if module.params.get('label'): + newkey['label'] = module.params.get('label') + for param in ('label', 'key'): + if param not in newkey: + _msg = ("If you want to ensure a key is present, you must " + "supply both a label and a key string, either in " + "module params, or in a key file. %s is missing" + % param) + raise Exception(_msg) + matching_sshkeys = [] + new_key_response = packet_conn.create_ssh_key( + newkey['label'], newkey['key']) + changed = True + + matching_sshkeys.append(new_key_response) + else: + # state is 'absent' => delete matching keys + for k in matching_sshkeys: + try: + k.delete() + changed = True + except Exception as e: + _msg = ("while trying to remove sshkey %s, id %s %s, " + "got error: %s" % + (k.label, k.id, target_state, e)) + raise Exception(_msg) + + return { + 'changed': changed, + 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys] + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=['present', 'absent'], default='present'), + auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), + no_log=True), + label=dict(type='str', aliases=['name']), + id=dict(type='str'), + fingerprint=dict(type='str'), + key=dict(type='str', no_log=True), + key_file=dict(type='path'), + ), + mutually_exclusive=[ + ('label', 'id'), + ('label', 'fingerprint'), + ('id', 'fingerprint'), + ('key', 'fingerprint'), + ('key', 'id'), + ('key_file', 'key'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable %s, " + "the auth_token parameter is required" % + PACKET_API_TOKEN_ENV_VAR) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in ['present', 'absent']: + try: + module.exit_json(**act_on_sshkeys(state, module, packet_conn)) + except Exception as e: + module.fail_json(msg='failed to set sshkey state: %s' % str(e)) + else: + module.fail_json(msg='%s is not a valid state for this module' % state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_volume.py b/ansible_collections/community/general/plugins/modules/packet_volume.py new file mode 100644 index 000000000..910d64b55 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_volume.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: packet_volume + +short_description: Create/delete a volume in Packet host + +description: + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). + +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Desired state of the volume. + default: present + choices: ['present', 'absent'] + type: str + + project_id: + description: + - ID of project of the device. + required: true + type: str + + auth_token: + description: + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + name: + description: + - Selector for API-generated name of the volume + type: str + + description: + description: + - User-defined description attribute for Packet volume. + - "It is used used as idempotent identifier - if volume with given + description exists, new one is not created." + type: str + + id: + description: + - UUID of a volume. + type: str + + plan: + description: + - storage_1 for standard tier, storage_2 for premium (performance) tier. + - Tiers are described at U(https://www.packet.com/cloud/storage/). + choices: ['storage_1', 'storage_2'] + default: 'storage_1' + type: str + + facility: + description: + - Location of the volume. + - Volumes can only be attached to device in the same location. + type: str + + size: + description: + - Size of the volume in gigabytes. + type: int + + locked: + description: + - Create new volume locked. + type: bool + default: false + + billing_cycle: + description: + - Billing cycle for new volume. + choices: ['hourly', 'monthly'] + default: 'hourly' + type: str + + snapshot_policy: + description: + - Snapshot policy for new volume. + type: dict + + suboptions: + snapshot_count: + description: + - How many snapshots to keep, a positive integer. + required: true + type: int + + snapshot_frequency: + description: + - Frequency of snapshots. + required: true + choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.35" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + vars: + volname: testvol123 + project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: Create volume + community.general.packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: 'ewr1' + plan: 'storage_1' + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + register: result_create + + - name: Delete volume + community.general.packet_volume: + id: "{{ result_create.id }}" + project_id: "{{ project_id }}" + state: absent +''' + +RETURN = ''' +id: + description: UUID of specified volume + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c +name: + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" +description: + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +''' + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +VOLUME_PLANS = ["storage_1", "storage_2"] +VOLUME_STATES = ["present", "absent"] +BILLING = ["hourly", "monthly"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(module): + if module.params.get('id'): + i = module.params.get('id') + if not is_valid_uuid(i): + raise Exception("Volume ID '{0}' is not a valid UUID".format(i)) + return lambda v: v['id'] == i + elif module.params.get('name'): + n = module.params.get('name') + return lambda v: v['name'] == n + elif module.params.get('description'): + d = module.params.get('description') + return lambda v: v['description'] == d + + +def get_or_fail(params, key): + item = params.get(key) + if item is None: + raise Exception("{0} must be specified for new volume".format(key)) + return item + + +def act_on_volume(target_state, module, packet_conn): + return_dict = {'changed': False} + s = get_volume_selector(module) + project_id = module.params.get("project_id") + api_method = "projects/{0}/storage".format(project_id) + all_volumes = packet_conn.call_api(api_method, "GET")['volumes'] + matching_volumes = [v for v in all_volumes if s(v)] + + if target_state == "present": + if len(matching_volumes) == 0: + params = { + "description": get_or_fail(module.params, "description"), + "size": get_or_fail(module.params, "size"), + "plan": get_or_fail(module.params, "plan"), + "facility": get_or_fail(module.params, "facility"), + "locked": get_or_fail(module.params, "locked"), + "billing_cycle": get_or_fail(module.params, "billing_cycle"), + "snapshot_policies": module.params.get("snapshot_policy"), + } + + new_volume_data = packet_conn.call_api(api_method, "POST", params) + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = new_volume_data[k] + + else: + for k in ['id', 'name', 'description']: + return_dict[k] = matching_volumes[0][k] + + else: + if len(matching_volumes) > 1: + _msg = ("More than one volume matches in module call for absent state: {0}".format( + to_native(matching_volumes))) + module.fail_json(msg=_msg) + + if len(matching_volumes) == 1: + volume = matching_volumes[0] + packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE") + return_dict['changed'] = True + for k in ['id', 'name', 'description']: + return_dict[k] = volume[k] + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str'), + description=dict(type="str"), + name=dict(type='str'), + state=dict(choices=VOLUME_STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + project_id=dict(required=True), + plan=dict(choices=VOLUME_PLANS, default="storage_1"), + facility=dict(type="str"), + size=dict(type="int"), + locked=dict(type="bool", default=False), + snapshot_policy=dict(type='dict'), + billing_cycle=dict(type='str', choices=BILLING, default="hourly"), + ), + supports_check_mode=True, + required_one_of=[("name", "id", "description")], + mutually_exclusive=[ + ('name', 'id'), + ('id', 'description'), + ('name', 'description'), + ] + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in VOLUME_STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json(**act_on_volume(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume state {0}: {1}".format( + state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py new file mode 100644 index 000000000..7f6c68e05 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: packet_volume_attachment + +short_description: Attach/detach a volume to a device in the Packet host + +description: + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - "This module creates the attachment route in the Packet API. In order to discover + the block devices on the server, you have to run the Attach Scripts, + as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." + +version_added: '0.2.0' + +author: + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Indicate desired state of the attachment. + default: present + choices: ['present', 'absent'] + type: str + + auth_token: + description: + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str + + project_id: + description: + - UUID of the project to which the device and volume belong. + type: str + required: true + + volume: + description: + - Selector for the volume. + - It can be a UUID, an API-generated volume name, or user-defined description string. + - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' + type: str + required: true + + device: + description: + - Selector for the device. + - It can be a UUID of the device, or a hostname. + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' + type: str + +requirements: + - "python >= 2.6" + - "packet-python >= 1.35" + +''' + +EXAMPLES = ''' +# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +# You can also pass the api token in module param auth_token. + +- hosts: localhost + + vars: + volname: testvol + devname: testdev + project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b + + tasks: + - name: Create volume + packet_volume: + description: "{{ volname }}" + project_id: "{{ project_id }}" + facility: ewr1 + plan: storage_1 + state: present + size: 10 + snapshot_policy: + snapshot_count: 10 + snapshot_frequency: 1day + + - name: Create a device + packet_device: + project_id: "{{ project_id }}" + hostnames: "{{ devname }}" + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: ewr1 + state: present + + - name: Attach testvol to testdev + community.general.packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + + - name: Detach testvol from testdev + community.general.packet_volume_attachment: + project_id: "{{ project_id }}" + volume: "{{ volname }}" + device: "{{ devname }}" + state: absent +''' + +RETURN = ''' +volume_id: + description: UUID of volume addressed by the module call. + type: str + returned: success + +device_id: + description: UUID of device addressed by the module call. + type: str + returned: success +''' + +import uuid + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.text.converters import to_native + +HAS_PACKET_SDK = True + + +try: + import packet +except ImportError: + HAS_PACKET_SDK = False + + +PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" + +STATES = ["present", "absent"] + + +def is_valid_uuid(myuuid): + try: + val = uuid.UUID(myuuid, version=4) + except ValueError: + return False + return str(val) == myuuid + + +def get_volume_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['name'] == spec or v['description'] == spec + + +def get_device_selector(spec): + if is_valid_uuid(spec): + return lambda v: v['id'] == spec + else: + return lambda v: v['hostname'] == spec + + +def do_attach(packet_conn, vol_id, dev_id): + api_method = "storage/{0}/attachments".format(vol_id) + packet_conn.call_api( + api_method, + params={"device_id": dev_id}, + type="POST") + + +def do_detach(packet_conn, vol, dev_id=None): + def dev_match(a): + return (dev_id is None) or (a['device']['id'] == dev_id) + for a in vol['attachments']: + if dev_match(a): + packet_conn.call_api(a['href'], type="DELETE") + + +def validate_selected(l, resource_type, spec): + if len(l) > 1: + _msg = ("more than one {0} matches specification {1}: {2}".format( + resource_type, spec, l)) + raise Exception(_msg) + if len(l) == 0: + _msg = "no {0} matches specification: {1}".format(resource_type, spec) + raise Exception(_msg) + + +def get_attached_dev_ids(volume_dict): + if len(volume_dict['attachments']) == 0: + return [] + else: + return [a['device']['id'] for a in volume_dict['attachments']] + + +def act_on_volume_attachment(target_state, module, packet_conn): + return_dict = {'changed': False} + volspec = module.params.get("volume") + devspec = module.params.get("device") + if devspec is None and target_state == 'present': + raise Exception("If you want to attach a volume, you must specify a device.") + project_id = module.params.get("project_id") + volumes_api_method = "projects/{0}/storage".format(project_id) + volumes = packet_conn.call_api(volumes_api_method, + params={'include': 'facility,attachments.device'})['volumes'] + v_match = get_volume_selector(volspec) + matching_volumes = [v for v in volumes if v_match(v)] + validate_selected(matching_volumes, "volume", volspec) + volume = matching_volumes[0] + return_dict['volume_id'] = volume['id'] + + device = None + if devspec is not None: + devices_api_method = "projects/{0}/devices".format(project_id) + devices = packet_conn.call_api(devices_api_method)['devices'] + d_match = get_device_selector(devspec) + matching_devices = [d for d in devices if d_match(d)] + validate_selected(matching_devices, "device", devspec) + device = matching_devices[0] + return_dict['device_id'] = device['id'] + + attached_device_ids = get_attached_dev_ids(volume) + + if target_state == "present": + if len(attached_device_ids) == 0: + do_attach(packet_conn, volume['id'], device['id']) + return_dict['changed'] = True + elif device['id'] not in attached_device_ids: + # Don't reattach volume which is attached to a different device. + # Rather fail than force remove a device on state == 'present'. + raise Exception("volume {0} is already attached to device {1}".format( + volume, attached_device_ids)) + else: + if device is None: + if len(attached_device_ids) > 0: + do_detach(packet_conn, volume) + return_dict['changed'] = True + elif device['id'] in attached_device_ids: + do_detach(packet_conn, volume, device['id']) + return_dict['changed'] = True + + return return_dict + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(choices=STATES, default="present"), + auth_token=dict( + type='str', + fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]), + no_log=True + ), + volume=dict(type="str", required=True), + project_id=dict(type="str", required=True), + device=dict(type="str"), + ), + supports_check_mode=True, + ) + + if not HAS_PACKET_SDK: + module.fail_json(msg='packet required for this module') + + if not module.params.get('auth_token'): + _fail_msg = ("if Packet API token is not in environment variable {0}, " + "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR)) + module.fail_json(msg=_fail_msg) + + auth_token = module.params.get('auth_token') + + packet_conn = packet.Manager(auth_token=auth_token) + + state = module.params.get('state') + + if state in STATES: + if module.check_mode: + module.exit_json(changed=False) + + try: + module.exit_json( + **act_on_volume_attachment(state, module, packet_conn)) + except Exception as e: + module.fail_json( + msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e))) + else: + module.fail_json(msg="{0} is not a valid state for this module".format(state)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pacman.py b/ansible_collections/community/general/plugins/modules/pacman.py new file mode 100644 index 000000000..66f58155d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pacman.py @@ -0,0 +1,859 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2022, Jean Raby +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pacman +short_description: Manage packages with I(pacman) +description: + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. +author: + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) + - Jean Raby (@jraby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. + Can't be used in combination with C(upgrade). + aliases: [ package, pkg ] + type: list + elements: str + + state: + description: + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. + - C(present) and C(installed) will simply ensure that a desired package is installed. + - C(latest) will update the specified package if it is not of the latest available version. + - C(absent) and C(removed) will remove the specified package. + default: present + choices: [ absent, installed, latest, present, removed ] + type: str + + force: + description: + - When removing packages, forcefully remove them, without any checks. + Same as I(extra_args="--nodeps --nodeps"). + When combined with I(update_cache), force a refresh of all package databases. + Same as I(update_cache_extra_args="--refresh --refresh"). + default: false + type: bool + + remove_nosave: + description: + - When removing packages, do not save modified configuration files as C(.pacsave) files. + (passes C(--nosave) to pacman) + version_added: 4.6.0 + default: false + type: bool + + executable: + description: + - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes + extensive use of the C(--print-format) directive which is known not to be implemented by + some AUR helpers (notably, C(yay)). + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman + type: str + version_added: 3.1.0 + + extra_args: + description: + - Additional option to pass to pacman when enforcing C(state). + default: '' + type: str + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + - If not specified, it defaults to C(false). + - Please note that this option only had an influence on the module's C(changed) state + if I(name) and I(upgrade) are not specified before community.general 5.0.0. + See the examples for how to keep the old behavior. + type: bool + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing C(update_cache). + default: '' + type: str + + upgrade: + description: + - Whether or not to upgrade the whole system. + Can't be used in combination with C(name). + - If not specified, it defaults to C(false). + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing C(upgrade). + default: '' + type: str + + reason: + description: + - The install reason to set for the packages. + choices: [ dependency, explicit ] + type: str + version_added: 5.4.0 + + reason_for: + description: + - Set the install reason for C(all) packages or only for C(new) packages. + - In case of I(state=latest) already installed packages which will be updated to a newer version are not counted as C(new). + default: new + choices: [ all, new ] + type: str + version_added: 5.4.0 + +notes: + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. + - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand. + For example, a dedicated build user with permissions to install packages could be necessary. +""" + +RETURN = """ +packages: + description: + - A list of packages that have been changed. + - Before community.general 4.5.0 this was only returned when I(upgrade=true). + In community.general 4.5.0, it was sometimes omitted when the package list is empty, + but since community.general 4.6.0 it is always returned when I(name) is specified or + I(upgrade=true). + returned: success and I(name) is specified or I(upgrade=true) + type: list + elements: str + sample: [ package, other-package ] + +cache_updated: + description: + - The changed status of C(pacman -Sy). + - Useful when I(name) or I(upgrade=true) are specified next to I(update_cache=true). + returned: success, when I(update_cache=true) + type: bool + sample: false + version_added: 4.6.0 + +stdout: + description: + - Output from pacman. + returned: success, when needed + type: str + sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." + version_added: 4.1.0 + +stderr: + description: + - Error output from pacman. + returned: success, when needed + type: str + sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." + version_added: 4.1.0 +""" + +EXAMPLES = """ +- name: Install package foo from repo + community.general.pacman: + name: foo + state: present + +- name: Install package bar from file + community.general.pacman: + name: ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Install package foo from repo and bar from file + community.general.pacman: + name: + - foo + - ~/bar-1.0-1-any.pkg.tar.xz + state: present + +- name: Install package from AUR using a Pacman compatible AUR helper + community.general.pacman: + name: foo + state: present + executable: yay + extra_args: --builddir /var/cache/yay + +- name: Upgrade package foo + # The 'changed' state of this call will indicate whether the cache was + # updated *or* whether foo was installed/upgraded. + community.general.pacman: + name: foo + state: latest + update_cache: true + +- name: Remove packages foo and bar + community.general.pacman: + name: + - foo + - bar + state: absent + +- name: Recursively remove package baz + community.general.pacman: + name: baz + state: absent + extra_args: --recursive + +- name: Run the equivalent of "pacman -Sy" as a separate step + community.general.pacman: + update_cache: true + +- name: Run the equivalent of "pacman -Su" as a separate step + community.general.pacman: + upgrade: true + +- name: Run the equivalent of "pacman -Syu" as a separate step + # Since community.general 5.0.0 the 'changed' state of this call + # will be 'true' in case the cache was updated, or when a package + # was updated. + # + # The previous behavior was to only indicate whether something was + # upgraded. To keep the old behavior, add the following to the task: + # + # register: result + # changed_when: result.packages | length > 0 + community.general.pacman: + update_cache: true + upgrade: true + +- name: Run the equivalent of "pacman -Rdd", force remove package baz + community.general.pacman: + name: baz + state: absent + force: true + +- name: Install foo as dependency and leave reason untouched if already installed + community.general.pacman: + name: foo + state: present + reason: dependency + reason_for: new + +- name: Run the equivalent of "pacman -S --asexplicit", mark foo as explicit and install it if not present + community.general.pacman: + name: foo + state: present + reason: explicit + reason_for: all +""" + +import shlex +from ansible.module_utils.basic import AnsibleModule +from collections import defaultdict, namedtuple + + +class Package(object): + def __init__(self, name, source, source_is_URL=False): + self.name = name + self.source = source + self.source_is_URL = source_is_URL + + def __eq__(self, o): + return self.name == o.name and self.source == o.source and self.source_is_URL == o.source_is_URL + + def __lt__(self, o): + return self.name < o.name + + def __repr__(self): + return 'Package("%s", "%s", %s)' % (self.name, self.source, self.source_is_URL) + + +VersionTuple = namedtuple("VersionTuple", ["current", "latest"]) + + +class Pacman(object): + def __init__(self, module): + self.m = module + + self.m.run_command_environ_update = dict(LC_ALL="C") + p = self.m.params + + self._msgs = [] + self._stdouts = [] + self._stderrs = [] + self.changed = False + self.exit_params = {} + + self.pacman_path = self.m.get_bin_path(p["executable"], True) + + self._cached_database = None + + # Normalize for old configs + if p["state"] == "installed": + self.target_state = "present" + elif p["state"] == "removed": + self.target_state = "absent" + else: + self.target_state = p["state"] + + def add_exit_infos(self, msg=None, stdout=None, stderr=None): + if msg: + self._msgs.append(msg) + if stdout: + self._stdouts.append(stdout) + if stderr: + self._stderrs.append(stderr) + + def _set_mandatory_exit_params(self): + msg = "\n".join(self._msgs) + stdouts = "\n".join(self._stdouts) + stderrs = "\n".join(self._stderrs) + if stdouts: + self.exit_params["stdout"] = stdouts + if stderrs: + self.exit_params["stderr"] = stderrs + self.exit_params["msg"] = msg # mandatory, but might be empty + + def fail(self, msg=None, stdout=None, stderr=None, **kwargs): + self.add_exit_infos(msg, stdout, stderr) + self._set_mandatory_exit_params() + if kwargs: + self.exit_params.update(**kwargs) + self.m.fail_json(**self.exit_params) + + def success(self): + self._set_mandatory_exit_params() + self.m.exit_json(changed=self.changed, **self.exit_params) + + def run(self): + if self.m.params["update_cache"]: + self.update_package_db() + + if not (self.m.params["name"] or self.m.params["upgrade"]): + self.success() + + self.inventory = self._build_inventory() + if self.m.params["upgrade"]: + self.upgrade() + self.success() + + if self.m.params["name"]: + pkgs = self.package_list() + + if self.target_state == "absent": + self.remove_packages(pkgs) + self.success() + else: + self.install_packages(pkgs) + self.success() + + # This shouldn't happen... + self.fail("This is a bug") + + def install_packages(self, pkgs): + pkgs_to_install = [] + pkgs_to_install_from_url = [] + pkgs_to_set_reason = [] + for p in pkgs: + if self.m.params["reason"] and ( + p.name not in self.inventory["pkg_reasons"] + or self.m.params["reason_for"] == "all" + and self.inventory["pkg_reasons"][p.name] != self.m.params["reason"] + ): + pkgs_to_set_reason.append(p.name) + if p.source_is_URL: + # URL packages bypass the latest / upgradable_pkgs test + # They go through the dry-run to let pacman decide if they will be installed + pkgs_to_install_from_url.append(p) + continue + if ( + p.name not in self.inventory["installed_pkgs"] + or self.target_state == "latest" + and p.name in self.inventory["upgradable_pkgs"] + ): + pkgs_to_install.append(p) + + if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0 and len(pkgs_to_set_reason) == 0: + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already installed") + return + + cmd_base = [ + self.pacman_path, + "--noconfirm", + "--noprogressbar", + "--needed", + ] + if self.m.params["extra_args"]: + cmd_base.extend(self.m.params["extra_args"]) + + def _build_install_diff(pacman_verb, pkglist): + # Dry run to build the installation diff + + cmd = cmd_base + [pacman_verb, "--print-format", "%n %v"] + [p.source for p in pkglist] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to list package(s) to install", cmd=cmd, stdout=stdout, stderr=stderr) + + name_ver = [l.strip() for l in stdout.splitlines()] + before = [] + after = [] + to_be_installed = [] + for p in name_ver: + # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that. + # When installing from URLs, pacman can also output a 'nothing to do' message. strip that too. + if "loading packages" in p or "there is nothing to do" in p: + continue + name, version = p.split() + if name in self.inventory["installed_pkgs"]: + before.append("%s-%s-%s" % (name, self.inventory["installed_pkgs"][name], self.inventory["pkg_reasons"][name])) + if name in pkgs_to_set_reason: + after.append("%s-%s-%s" % (name, version, self.m.params["reason"])) + elif name in self.inventory["pkg_reasons"]: + after.append("%s-%s-%s" % (name, version, self.inventory["pkg_reasons"][name])) + else: + after.append("%s-%s" % (name, version)) + to_be_installed.append(name) + + return (to_be_installed, before, after) + + before = [] + after = [] + installed_pkgs = [] + + if pkgs_to_install: + p, b, a = _build_install_diff("--sync", pkgs_to_install) + installed_pkgs.extend(p) + before.extend(b) + after.extend(a) + if pkgs_to_install_from_url: + p, b, a = _build_install_diff("--upgrade", pkgs_to_install_from_url) + installed_pkgs.extend(p) + before.extend(b) + after.extend(a) + + if len(installed_pkgs) == 0 and len(pkgs_to_set_reason) == 0: + # This can happen with URL packages if pacman decides there's nothing to do + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already installed") + return + + self.changed = True + + self.exit_params["diff"] = { + "before": "\n".join(sorted(before)) + "\n" if before else "", + "after": "\n".join(sorted(after)) + "\n" if after else "", + } + + changed_reason_pkgs = [p for p in pkgs_to_set_reason if p not in installed_pkgs] + + if self.m.check_mode: + self.add_exit_infos("Would have installed %d packages" % (len(installed_pkgs) + len(changed_reason_pkgs))) + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) + return + + # actually do it + def _install_packages_for_real(pacman_verb, pkglist): + cmd = cmd_base + [pacman_verb] + [p.source for p in pkglist] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self.add_exit_infos(stdout=stdout, stderr=stderr) + self._invalidate_database() + + if pkgs_to_install: + _install_packages_for_real("--sync", pkgs_to_install) + if pkgs_to_install_from_url: + _install_packages_for_real("--upgrade", pkgs_to_install_from_url) + + # set reason + if pkgs_to_set_reason: + cmd = [self.pacman_path, "--noconfirm", "--database"] + if self.m.params["reason"] == "dependency": + cmd.append("--asdeps") + else: + cmd.append("--asexplicit") + cmd.extend(pkgs_to_set_reason) + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self.add_exit_infos(stdout=stdout, stderr=stderr) + + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) + self.add_exit_infos("Installed %d package(s)" % (len(installed_pkgs) + len(changed_reason_pkgs))) + + def remove_packages(self, pkgs): + # filter out pkgs that are already absent + pkg_names_to_remove = [p.name for p in pkgs if p.name in self.inventory["installed_pkgs"]] + + if len(pkg_names_to_remove) == 0: + self.exit_params["packages"] = [] + self.add_exit_infos("package(s) already absent") + return + + # There's something to do, set this in advance + self.changed = True + + cmd_base = [self.pacman_path, "--remove", "--noconfirm", "--noprogressbar"] + cmd_base += self.m.params["extra_args"] + cmd_base += ["--nodeps", "--nodeps"] if self.m.params["force"] else [] + # nosave_args conflicts with --print-format. Added later. + # https://github.com/ansible-collections/community.general/issues/4315 + + # This is a bit of a TOCTOU but it is better than parsing the output of + # pacman -R, which is different depending on the user config (VerbosePkgLists) + # Start by gathering what would be removed + cmd = cmd_base + ["--print-format", "%n-%v"] + pkg_names_to_remove + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("failed to list package(s) to remove", cmd=cmd, stdout=stdout, stderr=stderr) + + removed_pkgs = stdout.split() + self.exit_params["packages"] = removed_pkgs + self.exit_params["diff"] = { + "before": "\n".join(removed_pkgs) + "\n", # trailing \n to avoid diff complaints + "after": "", + } + + if self.m.check_mode: + self.exit_params["packages"] = removed_pkgs + self.add_exit_infos("Would have removed %d packages" % len(removed_pkgs)) + return + + nosave_args = ["--nosave"] if self.m.params["remove_nosave"] else [] + cmd = cmd_base + nosave_args + pkg_names_to_remove + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("failed to remove package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self._invalidate_database() + self.exit_params["packages"] = removed_pkgs + self.add_exit_infos("Removed %d package(s)" % len(removed_pkgs), stdout=stdout, stderr=stderr) + + def upgrade(self): + """Runs pacman --sync --sysupgrade if there are upgradable packages""" + + if len(self.inventory["upgradable_pkgs"]) == 0: + self.add_exit_infos("Nothing to upgrade") + return + + self.changed = True # there are upgrades, so there will be changes + + # Build diff based on inventory first. + diff = {"before": "", "after": ""} + for pkg, versions in self.inventory["upgradable_pkgs"].items(): + diff["before"] += "%s-%s\n" % (pkg, versions.current) + diff["after"] += "%s-%s\n" % (pkg, versions.latest) + self.exit_params["diff"] = diff + self.exit_params["packages"] = self.inventory["upgradable_pkgs"].keys() + + if self.m.check_mode: + self.add_exit_infos( + "%d packages would have been upgraded" % (len(self.inventory["upgradable_pkgs"])) + ) + else: + cmd = [ + self.pacman_path, + "--sync", + "--sysupgrade", + "--quiet", + "--noconfirm", + ] + if self.m.params["upgrade_extra_args"]: + cmd += self.m.params["upgrade_extra_args"] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + self._invalidate_database() + if rc == 0: + self.add_exit_infos("System upgraded", stdout=stdout, stderr=stderr) + else: + self.fail("Could not upgrade", cmd=cmd, stdout=stdout, stderr=stderr) + + def _list_database(self): + """runs pacman --sync --list with some caching""" + if self._cached_database is None: + dummy, packages, dummy = self.m.run_command([self.pacman_path, '--sync', '--list'], check_rc=True) + self._cached_database = packages.splitlines() + return self._cached_database + + def _invalidate_database(self): + """invalidates the pacman --sync --list cache""" + self._cached_database = None + + def update_package_db(self): + """runs pacman --sync --refresh""" + if self.m.check_mode: + self.add_exit_infos("Would have updated the package db") + self.changed = True + self.exit_params["cache_updated"] = True + return + + cmd = [ + self.pacman_path, + "--sync", + "--refresh", + ] + if self.m.params["update_cache_extra_args"]: + cmd += self.m.params["update_cache_extra_args"] + if self.m.params["force"]: + cmd += ["--refresh"] + else: + # Dump package database to get contents before update + pre_state = sorted(self._list_database()) + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + self._invalidate_database() + + if self.m.params["force"]: + # Always changed when force=true + self.exit_params["cache_updated"] = True + else: + # Dump package database to get contents after update + post_state = sorted(self._list_database()) + # If contents changed, set changed=true + self.exit_params["cache_updated"] = pre_state != post_state + if self.exit_params["cache_updated"]: + self.changed = True + + if rc == 0: + self.add_exit_infos("Updated package db", stdout=stdout, stderr=stderr) + else: + self.fail("could not update package db", cmd=cmd, stdout=stdout, stderr=stderr) + + def package_list(self): + """Takes the input package list and resolves packages groups to their package list using the inventory, + extracts package names from packages given as files or URLs using calls to pacman + + Returns the expanded/resolved list as a list of Package + """ + pkg_list = [] + for pkg in self.m.params["name"]: + if not pkg: + continue + + is_URL = False + if pkg in self.inventory["available_groups"]: + # Expand group members + for group_member in self.inventory["available_groups"][pkg]: + pkg_list.append(Package(name=group_member, source=group_member)) + elif pkg in self.inventory["available_pkgs"] or pkg in self.inventory["installed_pkgs"]: + # Just a regular pkg, either available in the repositories, + # or locally installed, which we need to know for absent state + pkg_list.append(Package(name=pkg, source=pkg)) + else: + # Last resort, call out to pacman to extract the info, + # pkg is possibly in the / format, or a filename or a URL + + # Start with / case + cmd = [self.pacman_path, "--sync", "--print-format", "%n", pkg] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + # fallback to filename / URL + cmd = [self.pacman_path, "--upgrade", "--print-format", "%n", pkg] + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + if self.target_state == "absent": + continue # Don't bark for unavailable packages when trying to remove them + else: + self.fail( + msg="Failed to list package %s" % (pkg), + cmd=cmd, + stdout=stdout, + stderr=stderr, + rc=rc, + ) + # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs " filename_without_extension downloading..." if the URL is unseen. + # In all cases, pacman outputs "loading packages..." on stdout. strip both + stdout = stdout.splitlines()[-1] + is_URL = True + pkg_name = stdout.strip() + pkg_list.append(Package(name=pkg_name, source=pkg, source_is_URL=is_URL)) + + return pkg_list + + def _build_inventory(self): + """Build a cache datastructure used for all pkg lookups + Returns a dict: + { + "installed_pkgs": {pkgname: version}, + "installed_groups": {groupname: set(pkgnames)}, + "available_pkgs": {pkgname: version}, + "available_groups": {groupname: set(pkgnames)}, + "upgradable_pkgs": {pkgname: (current_version,latest_version)}, + "pkg_reasons": {pkgname: reason}, + } + + Fails the module if a package requested for install cannot be found + """ + + installed_pkgs = {} + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg, ver = l.split() + installed_pkgs[pkg] = ver + + installed_groups = defaultdict(set) + dummy, stdout, dummy = self.m.run_command( + [self.pacman_path, "--query", "--groups"], check_rc=True + ) + # Format of lines: + # base-devel file + # base-devel findutils + # ... + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + group, pkgname = l.split() + installed_groups[group].add(pkgname) + + available_pkgs = {} + database = self._list_database() + # Format of a line: "core pacman 6.0.1-2" + for l in database: + l = l.strip() + if not l: + continue + repo, pkg, ver = l.split()[:3] + available_pkgs[pkg] = ver + + available_groups = defaultdict(set) + dummy, stdout, dummy = self.m.run_command( + [self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True + ) + # Format of lines: + # vim-plugins vim-airline + # vim-plugins vim-airline-themes + # vim-plugins vim-ale + # ... + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + group, pkg = l.split() + available_groups[group].add(pkg) + + upgradable_pkgs = {} + rc, stdout, stderr = self.m.run_command( + [self.pacman_path, "--query", "--upgrades"], check_rc=False + ) + + # non-zero exit with nothing in stdout -> nothing to upgrade, all good + # stderr can have warnings, so not checked here + if rc == 1 and stdout == "": + pass # nothing to upgrade + elif rc == 0: + # Format of lines: + # strace 5.14-1 -> 5.15-1 + # systemd 249.7-1 -> 249.7-2 [ignored] + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + if "[ignored]" in l: + continue + s = l.split() + if len(s) != 4: + self.fail(msg="Invalid line: %s" % l) + + pkg = s[0] + current = s[1] + latest = s[3] + upgradable_pkgs[pkg] = VersionTuple(current=current, latest=latest) + else: + # stuff in stdout but rc!=0, abort + self.fail( + "Couldn't get list of packages available for upgrade", + stdout=stdout, + stderr=stderr, + rc=rc, + ) + + pkg_reasons = {} + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--explicit"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "explicit" + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--deps"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "dependency" + + return dict( + installed_pkgs=installed_pkgs, + installed_groups=installed_groups, + available_pkgs=available_pkgs, + available_groups=available_groups, + upgradable_pkgs=upgradable_pkgs, + pkg_reasons=pkg_reasons, + ) + + +def setup_module(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="list", elements="str", aliases=["pkg", "package"]), + state=dict( + type="str", + default="present", + choices=["present", "installed", "latest", "absent", "removed"], + ), + force=dict(type="bool", default=False), + remove_nosave=dict(type="bool", default=False), + executable=dict(type="str", default="pacman"), + extra_args=dict(type="str", default=""), + upgrade=dict(type="bool"), + upgrade_extra_args=dict(type="str", default=""), + update_cache=dict(type="bool"), + update_cache_extra_args=dict(type="str", default=""), + reason=dict(type="str", choices=["explicit", "dependency"]), + reason_for=dict(type="str", default="new", choices=["new", "all"]), + ), + required_one_of=[["name", "update_cache", "upgrade"]], + mutually_exclusive=[["name", "upgrade"]], + supports_check_mode=True, + ) + + # Split extra_args as the shell would for easier handling later + for str_args in ["extra_args", "upgrade_extra_args", "update_cache_extra_args"]: + module.params[str_args] = shlex.split(module.params[str_args]) + + return module + + +def main(): + + Pacman(setup_module()).run() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pacman_key.py b/ansible_collections/community/general/plugins/modules/pacman_key.py new file mode 100644 index 000000000..4d4c4afac --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pacman_key.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: pacman_key +author: + - George Rawlinson (@grawlinson) +version_added: "3.2.0" +short_description: Manage pacman's list of trusted keys +description: + - Add or remove gpg keys from the pacman keyring. +notes: + - Use full-length key ID (40 characters). + - Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden. + - Keys will be locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified. + - I(data), I(file), I(url), and I(keyserver) are mutually exclusive. +requirements: + - gpg + - pacman-key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is present (added) or absent (revoked). + default: present + choices: [ absent, present ] + type: str +''' + +EXAMPLES = ''' +- name: Import a key via local file + community.general.pacman_key: + data: "{{ lookup('file', 'keyfile.asc') }}" + state: present + +- name: Import a key via remote file + community.general.pacman_key: + file: /tmp/keyfile.asc + state: present + +- name: Import a key via url + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + url: https://domain.tld/keys/keyfile.asc + state: present + +- name: Import a key via keyserver + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + keyserver: keyserver.domain.tld + +- name: Import a key into an alternative keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + keyring: /etc/pacman.d/gnupg-alternative + +- name: Remove a key from the keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + state: absent +''' + +RETURN = r''' # ''' + +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_native + + +class PacmanKey(object): + def __init__(self, module): + self.module = module + # obtain binary paths for gpg & pacman-key + self.gpg = module.get_bin_path('gpg', required=True) + self.pacman_key = module.get_bin_path('pacman-key', required=True) + + # obtain module parameters + keyid = module.params['id'] + url = module.params['url'] + data = module.params['data'] + file = module.params['file'] + keyserver = module.params['keyserver'] + verify = module.params['verify'] + force_update = module.params['force_update'] + keyring = module.params['keyring'] + state = module.params['state'] + self.keylength = 40 + + # sanitise key ID & check if key exists in the keyring + keyid = self.sanitise_keyid(keyid) + key_present = self.key_in_keyring(keyring, keyid) + + # check mode + if module.check_mode: + if state == "present": + changed = (key_present and force_update) or not key_present + module.exit_json(changed=changed) + elif state == "absent": + if key_present: + module.exit_json(changed=True) + module.exit_json(changed=False) + + if state == "present": + if key_present and not force_update: + module.exit_json(changed=False) + + if data: + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif file: + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif url: + data = self.fetch_key(url) + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif keyserver: + self.recv_key(keyring, keyid, keyserver) + module.exit_json(changed=True) + elif state == "absent": + if key_present: + self.remove_key(keyring, keyid) + module.exit_json(changed=True) + module.exit_json(changed=False) + + def is_hexadecimal(self, string): + """Check if a given string is valid hexadecimal""" + try: + int(string, 16) + except ValueError: + return False + return True + + def sanitise_keyid(self, keyid): + """Sanitise given key ID. + + Strips whitespace, uppercases all characters, and strips leading `0X`. + """ + sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + if len(sanitised_keyid) != self.keylength: + self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) + if not self.is_hexadecimal(sanitised_keyid): + self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) + return sanitised_keyid + + def fetch_key(self, url): + """Downloads a key from url""" + response, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) + return to_native(response.read()) + + def recv_key(self, keyring, keyid, keyserver): + """Receives key via keyserver""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def lsign_key(self, keyring, keyid): + """Locally sign key""" + cmd = [self.pacman_key, '--gpgdir', keyring] + self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) + + def save_key(self, data): + "Saves key data to a temporary file" + tmpfd, tmpname = tempfile.mkstemp() + self.module.add_cleanup_file(tmpname) + tmpfile = os.fdopen(tmpfd, "w") + tmpfile.write(data) + tmpfile.close() + return tmpname + + def add_key(self, keyring, keyfile, keyid, verify): + """Add key to pacman's keyring""" + if verify: + self.verify_keyfile(keyfile, keyid) + cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def remove_key(self, keyring, keyid): + """Remove key from pacman's keyring""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] + self.module.run_command(cmd, check_rc=True) + + def verify_keyfile(self, keyfile, keyid): + """Verify that keyfile matches the specified key ID""" + if keyfile is None: + self.module.fail_json(msg="expected a key, got none") + elif keyid is None: + self.module.fail_json(msg="expected a key ID, got none") + + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--with-fingerprint', + '--batch', + '--no-tty', + '--show-keys', + keyfile + ], + check_rc=True, + ) + + extracted_keyid = None + for line in stdout.splitlines(): + if line.startswith('fpr:'): + extracted_keyid = line.split(':')[9] + break + + if extracted_keyid != keyid: + self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) + + def key_in_keyring(self, keyring, keyid): + "Check if the key ID is in pacman's keyring" + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--batch', + '--no-tty', + '--no-default-keyring', + '--keyring=%s/pubring.gpg' % keyring, + '--list-keys', keyid + ], + check_rc=False, + ) + if rc != 0: + if stderr.find("No public key") >= 0: + return False + else: + self.module.fail_json(msg="gpg returned an error: %s" % stderr) + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', required=True), + data=dict(type='str'), + file=dict(type='path'), + url=dict(type='str'), + keyserver=dict(type='str'), + verify=dict(type='bool', default=True), + force_update=dict(type='bool', default=False), + keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), + required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + ) + PacmanKey(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pagerduty.py b/ansible_collections/community/general/plugins/modules/pagerduty.py new file mode 100644 index 000000000..bed3629be --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pagerduty.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: pagerduty +short_description: Create PagerDuty maintenance windows +description: + - This module will let you create PagerDuty maintenance windows +author: + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" +requirements: + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: [ "running", "started", "ongoing", "absent" ] + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + type: str + description: + - PagerDuty user ID. Obsolete. Please, use I(token) for authorization. + token: + type: str + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + type: str + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + type: list + elements: str + description: + - A comma separated list of PagerDuty service IDs. + aliases: [ services ] + window_id: + type: str + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + type: str + description: + - Length of maintenance window in hours. + default: '1' + minutes: + type: str + description: + - Maintenance window in minutes (this is added to the hours). + default: '0' + desc: + type: str + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true +''' + +EXAMPLES = ''' +- name: List ongoing maintenance windows using a token + community.general.pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + state: ongoing + +- name: Create a 1 hour maintenance window for service FOO123 + community.general.pagerduty: + name: companyabc + user: example@example.com + token: yourtoken + state: running + service: FOO123 + +- name: Create a 5 minute maintenance window for service FOO123 + community.general.pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + hours: 0 + minutes: 5 + state: running + service: FOO123 + + +- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment" + community.general.pagerduty: + name: companyabc + user: example@example.com + state: running + service: FOO123 + hours: 4 + desc: deployment + register: pd_window + +- name: Delete the previous maintenance window + community.general.pagerduty: + name: companyabc + user: example@example.com + state: absent + window_id: '{{ pd_window.result.maintenance_window.id }}' + +# Delete a maintenance window from a separate playbook than its creation, +# and if it is the only existing maintenance window +- name: Check + community.general.pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: ongoing + register: pd_window + +- name: Delete + community.general.pagerduty: + requester_id: XXXXXXX + token: yourtoken + state: absent + window_id: "{{ pd_window.result.maintenance_windows[0].id }}" +''' + +import datetime +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class PagerDutyRequest(object): + def __init__(self, module, name, user, token): + self.module = module + self.name = name + self.user = user + self.token = token + self.headers = { + 'Content-Type': 'application/json', + "Authorization": self._auth_header(), + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + def ongoing(self, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing" + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers) + if info['status'] != 200: + self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, False + + def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url): + if not requester_id: + self.module.fail_json(msg="requester_id is required when maintenance window should be created") + + url = 'https://api.pagerduty.com/maintenance_windows' + + headers = dict(self.headers) + headers.update({'From': requester_id}) + + start, end = self._compute_start_end_time(hours, minutes) + services = self._create_services_payload(service) + + request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}} + + data = json.dumps(request_data) + response, info = http_call(self.module, url, data=data, headers=headers, method='POST') + if info['status'] != 201: + self.module.fail_json(msg="failed to create the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _create_services_payload(self, service): + if (isinstance(service, list)): + return [{'id': s, 'type': 'service_reference'} for s in service] + else: + return [{'id': service, 'type': 'service_reference'}] + + def _compute_start_end_time(self, hours, minutes): + now = datetime.datetime.utcnow() + later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now.strftime("%Y-%m-%dT%H:%M:%SZ") + end = later.strftime("%Y-%m-%dT%H:%M:%SZ") + return start, end + + def absent(self, window_id, http_call=fetch_url): + url = "https://api.pagerduty.com/maintenance_windows/" + window_id + headers = dict(self.headers) + + response, info = http_call(self.module, url, headers=headers, method='DELETE') + if info['status'] != 204: + self.module.fail_json(msg="failed to delete the window: %s" % info['msg']) + + json_out = self._read_response(response) + + return False, json_out, True + + def _auth_header(self): + return "Token token=%s" % self.token + + def _read_response(self, response): + try: + return json.loads(response.read()) + except Exception: + return "" + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), + name=dict(required=False), + user=dict(required=False), + token=dict(required=True, no_log=True), + service=dict(required=False, type='list', elements='str', aliases=["services"]), + window_id=dict(required=False), + requester_id=dict(required=False), + hours=dict(default='1', required=False), # @TODO change to int? + minutes=dict(default='0', required=False), # @TODO change to int? + desc=dict(default='Created by Ansible', required=False), + validate_certs=dict(default=True, type='bool'), + ) + ) + + state = module.params['state'] + name = module.params['name'] + user = module.params['user'] + service = module.params['service'] + window_id = module.params['window_id'] + hours = module.params['hours'] + minutes = module.params['minutes'] + token = module.params['token'] + desc = module.params['desc'] + requester_id = module.params['requester_id'] + + pd = PagerDutyRequest(module, name, user, token) + + if state == "running" or state == "started": + if not service: + module.fail_json(msg="service not specified") + (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc) + if rc == 0: + changed = True + + if state == "ongoing": + (rc, out, changed) = pd.ongoing() + + if state == "absent": + (rc, out, changed) = pd.absent(window_id) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py new file mode 100644 index 000000000..45bec92c6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: pagerduty_alert +short_description: Trigger, acknowledge or resolve PagerDuty incidents +description: + - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events +author: + - "Amanpreet Singh (@ApsOps)" +requirements: + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + service_id: + type: str + description: + - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. + required: true + service_key: + type: str + description: + - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key). + integration_key: + type: str + description: + - The GUID of one of your "Generic API" services. + - This is the "integration key" listed on a "Integrations" tab of PagerDuty service. + state: + type: str + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_key: + type: str + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + required: true + desc: + type: str + description: + - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) + will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. + The maximum length is 1024 characters. + - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. + required: false + default: Created via Ansible + incident_key: + type: str + description: + - Identifies the incident to which this I(state) should be applied. + - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an + open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" + problem reports. + - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a + trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + required: false + client: + type: str + description: + - The name of the monitoring client that is triggering this event. + required: false + client_url: + type: str + description: + - The URL of the monitoring client that is triggering this event. + required: false +''' + +EXAMPLES = ''' +- name: Trigger an incident with just the basic options + community.general.pagerduty_alert: + name: companyabc + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + +- name: Trigger an incident with more options + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + +- name: Acknowledge an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: acknowledged + incident_key: somekey + desc: "some text for incident's log" + +- name: Resolve an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: resolved + incident_key: somekey + desc: "some text for incident's log" +''' +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse + + +def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): + url = 'https://api.pagerduty.com/incidents' + headers = { + "Content-type": "application/json", + "Authorization": "Token token=%s" % api_key, + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + params = { + 'service_ids[]': service_id, + 'sort_by': 'incident_number:desc', + 'time_zone': 'UTC' + } + if incident_key: + params['incident_key'] = incident_key + + url_parts = list(urlparse(url)) + url_parts[4] = urlencode(params, True) + + url = urlunparse(url_parts) + + response, info = http_call(module, url, method='get', headers=headers) + + if info['status'] != 200: + module.fail_json(msg="failed to check current incident status." + "Reason: %s" % info['msg']) + + incidents = json.loads(response.read())["incidents"] + msg = "No corresponding incident" + + if len(incidents) == 0: + if state in ('acknowledged', 'resolved'): + return msg, False + return msg, True + elif state != incidents[0]["status"]: + return incidents[0], True + + return incidents[0], False + + +def send_event(module, service_key, event_type, desc, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + headers = { + "Content-type": "application/json" + } + + data = { + "service_key": service_key, + "event_type": event_type, + "incident_key": incident_key, + "description": desc, + "client": client, + "client_url": client_url + } + + response, info = fetch_url(module, url, method='post', + headers=headers, data=json.dumps(data)) + if info['status'] != 200: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + service_id=dict(required=True), + service_key=dict(required=False, no_log=True), + integration_key=dict(required=False, no_log=True), + api_key=dict(required=True, no_log=True), + state=dict(required=True, + choices=['triggered', 'acknowledged', 'resolved']), + client=dict(required=False, default=None), + client_url=dict(required=False, default=None), + desc=dict(required=False, default='Created via Ansible'), + incident_key=dict(required=False, default=None, no_log=False) + ), + supports_check_mode=True + ) + + name = module.params['name'] + service_id = module.params['service_id'] + integration_key = module.params['integration_key'] + service_key = module.params['service_key'] + api_key = module.params['api_key'] + state = module.params['state'] + client = module.params['client'] + client_url = module.params['client_url'] + desc = module.params['desc'] + incident_key = module.params['incident_key'] + + if integration_key is None: + if service_key is not None: + integration_key = service_key + module.warn('"service_key" is obsolete parameter and will be removed.' + ' Please, use "integration_key" instead') + else: + module.fail_json(msg="'integration_key' is required parameter") + + state_event_dict = { + 'triggered': 'trigger', + 'acknowledged': 'acknowledge', + 'resolved': 'resolve' + } + + event_type = state_event_dict[state] + + if event_type != 'trigger' and incident_key is None: + module.fail_json(msg="incident_key is required for " + "acknowledge or resolve events") + + out, changed = check(module, name, state, service_id, + integration_key, api_key, incident_key) + + if not module.check_mode and changed is True: + out = send_event(module, integration_key, event_type, desc, + incident_key, client, client_url) + + module.exit_json(result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/ansible_collections/community/general/plugins/modules/pagerduty_change.py new file mode 100644 index 000000000..6af5d58ea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pagerduty_change.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: pagerduty_change +short_description: Track a code or infrastructure change as a PagerDuty change event +version_added: 1.3.0 +description: + - This module will let you create a PagerDuty change event each time the module is run. + - This is not an idempotent action and a new change event will be created each time it is run. +author: + - Adam Vaughan (@adamvaughan) +requirements: + - PagerDuty integration key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + details: + - Check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct. + diff_mode: + support: none +options: + integration_key: + description: + - The integration key that identifies the service the change was made to. + This can be found by adding an integration to a service in PagerDuty. + required: true + type: str + summary: + description: + - A short description of the change that occurred. + required: true + type: str + source: + description: + - The source of the change event. + default: Ansible + type: str + user: + description: + - The name of the user or process that triggered this deployment. + type: str + repo: + description: + - The URL of the project repository. + required: false + type: str + revision: + description: + - An identifier of the revision being deployed, typically a number or SHA from a version control system. + required: false + type: str + environment: + description: + - The environment name, typically C(production), C(staging), etc. + required: false + type: str + link_url: + description: + - A URL where more information about the deployment can be obtained. + required: false + type: str + link_text: + description: + - Descriptive text for a URL where more information about the deployment can be obtained. + required: false + type: str + url: + description: + - URL to submit the change event to. + required: false + default: https://events.pagerduty.com/v2/change/enqueue + type: str + validate_certs: + description: + - If C(false), SSL certificates for the target URL will not be validated. + This should only be used on personally controlled sites using self-signed certificates. + required: false + default: true + type: bool +''' + +EXAMPLES = ''' +- name: Track the deployment as a PagerDuty change event + community.general.pagerduty_change: + integration_key: abc123abc123abc123abc123abc123ab + summary: The application was deployed + +- name: Track the deployment as a PagerDuty change event with more details + community.general.pagerduty_change: + integration_key: abc123abc123abc123abc123abc123ab + summary: The application was deployed + source: Ansible Deploy + user: ansible + repo: github.com/ansible/ansible + revision: '4.2' + environment: production + link_url: https://github.com/ansible-collections/community.general/pull/1269 + link_text: View changes on GitHub +''' + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule +from datetime import datetime + + +def main(): + module = AnsibleModule( + argument_spec=dict( + integration_key=dict(required=True, type='str', no_log=True), + summary=dict(required=True, type='str'), + source=dict(required=False, default='Ansible', type='str'), + user=dict(required=False, type='str'), + repo=dict(required=False, type='str'), + revision=dict(required=False, type='str'), + environment=dict(required=False, type='str'), + link_url=dict(required=False, type='str'), + link_text=dict(required=False, type='str'), + url=dict(required=False, + default='https://events.pagerduty.com/v2/change/enqueue', type='str'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/ + + url = module.params['url'] + headers = {'Content-Type': 'application/json'} + + if module.check_mode: + _response, info = fetch_url( + module, url, headers=headers, method='POST') + + if info['status'] == 400: + module.exit_json(changed=True) + else: + module.fail_json( + msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status'])) + + custom_details = {} + + if module.params['user']: + custom_details['user'] = module.params['user'] + + if module.params['repo']: + custom_details['repo'] = module.params['repo'] + + if module.params['revision']: + custom_details['revision'] = module.params['revision'] + + if module.params['environment']: + custom_details['environment'] = module.params['environment'] + + now = datetime.utcnow() + timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + payload = { + 'summary': module.params['summary'], + 'source': module.params['source'], + 'timestamp': timestamp, + 'custom_details': custom_details + } + + event = { + 'routing_key': module.params['integration_key'], + 'payload': payload + } + + if module.params['link_url']: + link = { + 'href': module.params['link_url'] + } + + if module.params['link_text']: + link['text'] = module.params['link_text'] + + event['links'] = [link] + + _response, info = fetch_url( + module, url, data=module.jsonify(event), headers=headers, method='POST') + + if info['status'] == 202: + module.exit_json(changed=True) + else: + module.fail_json( + msg='Creating PagerDuty change event failed with %d' % (info['status'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/ansible_collections/community/general/plugins/modules/pagerduty_user.py new file mode 100644 index 000000000..9c9805bff --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pagerduty_user.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: pagerduty_user +short_description: Manage a user account on PagerDuty +description: + - This module manages the creation/removal of a user account on PagerDuty. +version_added: '1.3.0' +author: Zainab Alsaffar (@zanssa) +requirements: + - pdpyras python module = 4.1.1 + - PagerDuty API Access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + access_token: + description: + - An API access token to authenticate with the PagerDuty REST API. + required: true + type: str + pd_user: + description: + - Name of the user in PagerDuty. + required: true + type: str + pd_email: + description: + - The user's email address. + - I(pd_email) is the unique identifier used and cannot be updated using this module. + required: true + type: str + pd_role: + description: + - The user's role. + choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] + default: 'responder' + type: str + state: + description: + - State of the user. + - On C(present), it creates a user if the user doesn't exist. + - On C(absent), it removes a user if the account exists. + choices: ['present', 'absent'] + default: 'present' + type: str + pd_teams: + description: + - The teams to which the user belongs. + - Required if I(state=present). + type: list + elements: str +''' + +EXAMPLES = r''' +- name: Create a user account on PagerDuty + community.general.pagerduty_user: + access_token: 'Your_Access_token' + pd_user: user_full_name + pd_email: user_email + pd_role: user_pd_role + pd_teams: user_pd_teams + state: "present" + +- name: Remove a user account from PagerDuty + community.general.pagerduty_user: + access_token: 'Your_Access_token' + pd_user: user_full_name + pd_email: user_email + state: "absent" +''' + +RETURN = r''' # ''' + +from os import path +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("pdpyras", url="https://github.com/PagerDuty/pdpyras"): + from pdpyras import APISession, PDClientError + + +class PagerDutyUser(object): + def __init__(self, module, session): + self._module = module + self._apisession = session + + # check if the user exists + def does_user_exist(self, pd_email): + for user in self._apisession.iter_all('users'): + if user['email'] == pd_email: + return user['id'] + + # create a user account on PD + def add_pd_user(self, pd_name, pd_email, pd_role): + try: + user = self._apisession.persist('users', 'email', { + "name": pd_name, + "email": pd_email, + "type": "user", + "role": pd_role, + }) + return user + + except PDClientError as e: + if e.response.status_code == 400: + self._module.fail_json( + msg="Failed to add %s due to invalid argument" % (pd_name)) + if e.response.status_code == 401: + self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name)) + if e.response.status_code == 402: + self._module.fail_json( + msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name)) + if e.response.status_code == 403: + self._module.fail_json( + msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name)) + if e.response.status_code == 429: + self._module.fail_json( + msg="Failed to add %s due to reaching the limit of making requests" % (pd_name)) + + # delete a user account from PD + def delete_user(self, pd_user_id, pd_name): + try: + user_path = path.join('/users/', pd_user_id) + self._apisession.rdelete(user_path) + + except PDClientError as e: + if e.response.status_code == 404: + self._module.fail_json( + msg="Failed to remove %s as user was not found" % (pd_name)) + if e.response.status_code == 403: + self._module.fail_json( + msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name)) + if e.response.status_code == 401: + # print out the list of incidents + pd_incidents = self.get_incidents_assigned_to_user(pd_user_id) + self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents)) + if e.response.status_code == 429: + self._module.fail_json( + msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name)) + + # get incidents assigned to a user + def get_incidents_assigned_to_user(self, pd_user_id): + incident_info = {} + incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]}) + + for incident in incidents: + incident_info = { + 'title': incident['title'], + 'key': incident['incident_key'], + 'status': incident['status'] + } + return incident_info + + # add a user to a team/teams + def add_user_to_teams(self, pd_user_id, pd_teams, pd_role): + updated_team = None + for team in pd_teams: + team_info = self._apisession.find('teams', team, attribute='name') + if team_info is not None: + try: + updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={ + 'role': pd_role + }) + except PDClientError: + updated_team = None + return updated_team + + +def main(): + module = AnsibleModule( + argument_spec=dict( + access_token=dict(type='str', required=True, no_log=True), + pd_user=dict(type='str', required=True), + pd_email=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + pd_role=dict(type='str', default='responder', + choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), + pd_teams=dict(type='list', elements='str', required=False)), + required_if=[['state', 'present', ['pd_teams']], ], + supports_check_mode=True, + ) + + deps.validate(module) + + access_token = module.params['access_token'] + pd_user = module.params['pd_user'] + pd_email = module.params['pd_email'] + state = module.params['state'] + pd_role = module.params['pd_role'] + pd_teams = module.params['pd_teams'] + + if pd_role: + pd_role_gui_value = { + 'global_admin': 'admin', + 'manager': 'user', + 'responder': 'limited_user', + 'observer': 'observer', + 'stakeholder': 'read_only_user', + 'limited_stakeholder': 'read_only_limited_user', + 'restricted_access': 'restricted_access' + } + pd_role = pd_role_gui_value[pd_role] + + # authenticate with PD API + try: + session = APISession(access_token) + except PDClientError as e: + module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e) + + user = PagerDutyUser(module, session) + + user_exists = user.does_user_exist(pd_email) + + if user_exists: + if state == "absent": + # remove user + if not module.check_mode: + user.delete_user(user_exists, pd_user) + module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user) + else: + module.exit_json(changed=False, result="User %s already exists." % pd_user) + + # in case that the user does not exist + else: + if state == "absent": + module.exit_json(changed=False, result="User %s was not found." % pd_user) + + else: + # add user, adds user with the default notification rule and contact info (email) + if not module.check_mode: + user.add_pd_user(pd_user, pd_email, pd_role) + # get user's id + pd_user_id = user.does_user_exist(pd_email) + # add a user to the team/s + user.add_user_to_teams(pd_user_id, pd_teams, pd_role) + module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pam_limits.py b/ansible_collections/community/general/plugins/modules/pam_limits.py new file mode 100644 index 000000000..dbb70045d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pam_limits.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Sebastien Rohaut +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: pam_limits +author: +- "Sebastien Rohaut (@usawa)" +short_description: Modify Linux PAM limits +description: + - The C(pam_limits) module modifies PAM limits. + - The default file is C(/etc/security/limits.conf). + - For the full documentation, see C(man 5 limits.conf). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 2.0.0 + diff_mode: + support: full + version_added: 2.0.0 +options: + domain: + type: str + description: + - A username, @groupname, wildcard, UID/GID range. + required: true + limit_type: + type: str + description: + - Limit type, see C(man 5 limits.conf) for an explanation. + required: true + choices: [ "hard", "soft", "-" ] + limit_item: + type: str + description: + - The limit to be set. + required: true + choices: + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" + value: + type: str + description: + - The value of the limit. + - Value must either be C(unlimited), C(infinity) or C(-1), all of which indicate no limit, or a limit of 0 or larger. + - Value must be a number in the range -20 to 19 inclusive, if I(limit_item) is set to C(nice) or C(priority). + - Refer to the C(man 5 limits.conf) manual pages for more details. + required: true + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + required: false + type: bool + default: false + use_min: + description: + - If set to C(true), the minimal value will be used or conserved. + - If the specified value is inferior to the value in the file, + file content is replaced with the new value, else content is not modified. + required: false + type: bool + default: false + use_max: + description: + - If set to C(true), the maximal value will be used or conserved. + - If the specified value is superior to the value in the file, + file content is replaced with the new value, else content is not modified. + required: false + type: bool + default: false + dest: + type: str + description: + - Modify the limits.conf path. + required: false + default: "/etc/security/limits.conf" + comment: + type: str + description: + - Comment associated with the limit. + required: false + default: '' +notes: + - If I(dest) file does not exist, it is created. +''' + +EXAMPLES = r''' +- name: Add or modify nofile soft limit for the user joe + community.general.pam_limits: + domain: joe + limit_type: soft + limit_item: nofile + value: 64000 + +- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value + community.general.pam_limits: + domain: smith + limit_type: hard + limit_item: fsize + value: 1000000 + use_max: true + +- name: Add or modify memlock, both soft and hard, limit for the user james with a comment + community.general.pam_limits: + domain: james + limit_type: '-' + limit_item: memlock + value: unlimited + comment: unlimited memory lock for james + +- name: Add or modify hard nofile limits for wildcard domain + community.general.pam_limits: + domain: '*' + limit_type: hard + limit_item: nofile + value: 39693561 +''' + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def _assert_is_valid_value(module, item, value, prefix=''): + if item in ['nice', 'priority']: + try: + valid = -20 <= int(value) <= 19 + except ValueError: + valid = False + if not valid: + module.fail_json(msg="%s Value of %r for item %r is invalid. Value must be a number in the range -20 to 19 inclusive. " + "Refer to the limits.conf(5) manual pages for more details." % (prefix, value, item)) + elif not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()): + module.fail_json(msg="%s Value of %r for item %r is invalid. Value must either be 'unlimited', 'infinity' or -1, all of " + "which indicate no limit, or a limit of 0 or larger. Refer to the limits.conf(5) manual pages for " + "more details." % (prefix, value, item)) + + +def main(): + pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', + 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot'] + + pam_types = ['soft', 'hard', '-'] + + limits_conf = '/etc/security/limits.conf' + + module = AnsibleModule( + # not checking because of daisy chain to file module + argument_spec=dict( + domain=dict(required=True, type='str'), + limit_type=dict(required=True, type='str', choices=pam_types), + limit_item=dict(required=True, type='str', choices=pam_items), + value=dict(required=True, type='str'), + use_max=dict(default=False, type='bool'), + use_min=dict(default=False, type='bool'), + backup=dict(default=False, type='bool'), + dest=dict(default=limits_conf, type='str'), + comment=dict(required=False, default='', type='str') + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + limit_type = module.params['limit_type'] + limit_item = module.params['limit_item'] + value = module.params['value'] + use_max = module.params['use_max'] + use_min = module.params['use_min'] + backup = module.params['backup'] + limits_conf = module.params['dest'] + new_comment = module.params['comment'] + + changed = False + + if os.path.isfile(limits_conf): + if not os.access(limits_conf, os.W_OK): + module.fail_json(msg="%s is not writable. Use sudo" % limits_conf) + else: + limits_conf_dir = os.path.dirname(limits_conf) + if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK): + open(limits_conf, 'a').close() + changed = True + else: + module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir) + + if use_max and use_min: + module.fail_json(msg="Cannot use use_min and use_max at the same time.") + + _assert_is_valid_value(module, limit_item, value) + + # Backup + if backup: + backup_file = module.backup_local(limits_conf) + + space_pattern = re.compile(r'\s+') + + message = '' + f = open(limits_conf, 'rb') + # Tempfile + nf = tempfile.NamedTemporaryFile(mode='w+') + + found = False + new_value = value + + for line in f: + line = to_native(line, errors='surrogate_or_strict') + if line.startswith('#'): + nf.write(line) + continue + + newline = re.sub(space_pattern, ' ', line).strip() + if not newline: + nf.write(line) + continue + + # Remove comment in line + newline = newline.split('#', 1)[0] + try: + old_comment = line.split('#', 1)[1] + except Exception: + old_comment = '' + + newline = newline.rstrip() + + if not new_comment: + new_comment = old_comment + + line_fields = newline.split(' ') + + if len(line_fields) != 4: + nf.write(line) + continue + + line_domain = line_fields[0] + line_type = line_fields[1] + line_item = line_fields[2] + actual_value = line_fields[3] + + _assert_is_valid_value(module, line_item, actual_value, + prefix="Invalid configuration found in '%s'." % limits_conf) + + # Found the line + if line_domain == domain and line_type == limit_type and line_item == limit_item: + found = True + if value == actual_value: + message = line + nf.write(line) + continue + + if line_type not in ['nice', 'priority']: + actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1'] + value_unlimited = value in ['unlimited', 'infinity', '-1'] + else: + actual_value_unlimited = value_unlimited = False + + if use_max: + if actual_value_unlimited: + new_value = actual_value + elif value_unlimited: + new_value = value + else: + new_value = str(max(int(value), int(actual_value))) + + if use_min: + if actual_value_unlimited and value_unlimited: + new_value = actual_value + elif actual_value_unlimited: + new_value = value + elif value_unlimited: + new_value = actual_value + else: + new_value = str(min(int(value), int(actual_value))) + + # Change line only if value has changed + if new_value != actual_value: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + else: + message = line + nf.write(line) + else: + nf.write(line) + + if not found: + changed = True + if new_comment: + new_comment = "\t#" + new_comment + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" + message = new_limit + nf.write(new_limit) + + f.close() + nf.flush() + + with open(limits_conf, 'r') as content: + content_current = content.read() + + with open(nf.name, 'r') as content: + content_new = content.read() + + if not module.check_mode: + # Copy tempfile to newfile + module.atomic_move(nf.name, limits_conf) + + try: + nf.close() + except Exception: + pass + + res_args = dict( + changed=changed, + msg=message, + diff=dict(before=content_current, after=content_new), + ) + + if backup: + res_args['backup_file'] = backup_file + + module.exit_json(**res_args) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pamd.py b/ansible_collections/community/general/plugins/modules/pamd.py new file mode 100644 index 000000000..6ffc8624e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pamd.py @@ -0,0 +1,853 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Kenneth D. Evensen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +module: pamd +author: + - Kenneth D. Evensen (@kevensen) +short_description: Manage PAM Modules +description: + - Edit PAM service's type, control, module path and module arguments. + - In order for a PAM rule to be modified, the type, control and + module_path must match an existing rule. See man(5) pam.d for details. +notes: + - This module does not handle authselect profiles. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name generally refers to the PAM service file to + change, for example system-auth. + type: str + required: true + type: + description: + - The type of the PAM rule being modified. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + choices: [ account, -account, auth, -auth, password, -password, session, -session ] + control: + description: + - The control of the PAM rule being modified. + - This may be a complicated control with brackets. If this is the case, be + sure to put "[bracketed controls]" in quotes. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + module_path: + description: + - The module path of the PAM rule being modified. + - The C(type), C(control) and C(module_path) all must match a rule to be modified. + type: str + required: true + new_type: + description: + - The new type to assign to the new rule. + type: str + choices: [ account, -account, auth, -auth, password, -password, session, -session ] + new_control: + description: + - The new control to assign to the new rule. + type: str + new_module_path: + description: + - The new module path to be assigned to the new rule. + type: str + module_arguments: + description: + - When state is C(updated), the module_arguments will replace existing module_arguments. + - When state is C(args_absent) args matching those listed in module_arguments will be removed. + - When state is C(args_present) any args listed in module_arguments are added if + missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), + the value will be changed to that specified in module_arguments. + type: list + elements: str + state: + description: + - The default of C(updated) will modify an existing rule if type, + control and module_path all match an existing rule. + - With C(before), the new rule will be inserted before a rule matching type, + control and module_path. + - Similarly, with C(after), the new rule will be inserted after an existing rulematching type, + control and module_path. + - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified. + - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored. + - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4. + type: str + choices: [ absent, before, after, args_absent, args_present, updated ] + default: updated + path: + description: + - This is the path to the PAM service files. + type: path + default: /etc/pam.d + backup: + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. + type: bool + default: false +''' + +EXAMPLES = r''' +- name: Update pamd rule's control in /etc/pam.d/system-auth + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_control: sufficient + +- name: Update pamd rule's complex control in /etc/pam.d/system-auth + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + new_control: '[success=2 default=ignore]' + +- name: Insert a new rule before an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + new_type: auth + new_control: sufficient + new_module_path: pam_faillock.so + state: before + +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \ + existing rule pam_rootok.so + community.general.pamd: + name: su + type: auth + control: sufficient + module_path: pam_rootok.so + new_type: auth + new_control: required + new_module_path: pam_wheel.so + module_arguments: 'use_uid' + state: after + +- name: Remove module arguments from an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: '' + state: updated + +- name: Replace all module arguments in an existing rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'preauth + silent + deny=3 + unlock_time=604800 + fail_interval=900' + state: updated + +- name: Remove specific arguments from a rule + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_absent + +- name: Ensure specific arguments are present in a rule + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: crond,quiet + state: args_present + +- name: Ensure specific arguments are present in a rule (alternative) + community.general.pamd: + name: system-auth + type: session + control: '[success=1 default=ignore]' + module_path: pam_succeed_if.so + module_arguments: + - crond + - quiet + state: args_present + +- name: Module arguments requiring commas must be listed as a Yaml list + community.general.pamd: + name: special-module + type: account + control: required + module_path: pam_access.so + module_arguments: + - listsep=, + state: args_present + +- name: Update specific argument value in a rule + community.general.pamd: + name: system-auth + type: auth + control: required + module_path: pam_faillock.so + module_arguments: 'fail_interval=300' + state: args_present + +- name: Add pam common-auth rule for duo + community.general.pamd: + name: common-auth + new_type: auth + new_control: '[success=1 default=ignore]' + new_module_path: '/lib64/security/pam_duo.so' + state: after + type: auth + module_path: pam_sss.so + control: 'requisite' +''' + +RETURN = r''' +change_count: + description: How many rules were changed. + type: int + sample: 1 + returned: success +backupdest: + description: + - "The file name of the backup file, if created." + returned: success + type: str +... +''' + + +from ansible.module_utils.basic import AnsibleModule +import os +import re +from tempfile import NamedTemporaryFile +from datetime import datetime + + +RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s+ + (?P\[.*\]|\S*)\s+ + (?P\S*)\s* + (?P.*)\s*""", re.X) +RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)") + +VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] + + +class PamdLine(object): + + def __init__(self, line): + self.line = line + self.prev = None + self.next = None + + @property + def is_valid(self): + if self.line.strip() == '': + return True + return False + + def validate(self): + if not self.is_valid: + return False, "Rule is not valid " + self.line + return True, "Rule is valid " + self.line + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + return False + + def __str__(self): + return str(self.line) + + +class PamdEmptyLine(PamdLine): + pass + + +class PamdComment(PamdLine): + + def __init__(self, line): + super(PamdComment, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('#'): + return True + return False + + +class PamdInclude(PamdLine): + def __init__(self, line): + super(PamdInclude, self).__init__(line) + + @property + def is_valid(self): + if self.line.startswith('@include'): + return True + return False + + +class PamdRule(PamdLine): + + valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive'] + valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err', + 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown', + 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail', + 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err', + 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again', + 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again', + 'incomplete', 'default'] + valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset'] + + def __init__(self, rule_type, rule_control, rule_path, rule_args=None): + self.prev = None + self.next = None + self._control = None + self._args = None + self.rule_type = rule_type + self.rule_control = rule_control + + self.rule_path = rule_path + self.rule_args = rule_args + + # Method to check if a rule matches the type, control and path. + def matches(self, rule_type, rule_control, rule_path, rule_args=None): + return (rule_type == self.rule_type and + rule_control == self.rule_control and + rule_path == self.rule_path) + + @classmethod + def rule_from_string(cls, line): + rule_match = RULE_REGEX.search(line) + rule_args = parse_module_arguments(rule_match.group('args')) + return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args) + + def __str__(self): + if self.rule_args: + return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args)) + return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path) + + @property + def rule_control(self): + if isinstance(self._control, list): + return '[' + ' '.join(self._control) + ']' + return self._control + + @rule_control.setter + def rule_control(self, control): + if control.startswith('['): + control = control.replace(' = ', '=').replace('[', '').replace(']', '') + self._control = control.split(' ') + else: + self._control = control + + @property + def rule_args(self): + if not self._args: + return [] + return self._args + + @rule_args.setter + def rule_args(self, args): + self._args = parse_module_arguments(args) + + @property + def line(self): + return str(self) + + @classmethod + def is_action_unsigned_int(cls, string_num): + number = 0 + try: + number = int(string_num) + except ValueError: + return False + + if number >= 0: + return True + return False + + @property + def is_valid(self): + return self.validate()[0] + + def validate(self): + # Validate the rule type + if self.rule_type not in VALID_TYPES: + return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line + # Validate the rule control + if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls: + return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line + elif isinstance(self._control, list): + for control in self._control: + value, action = control.split("=") + if value not in PamdRule.valid_control_values: + return False, "Rule control value, " + value + ", is not valid in rule " + self.line + if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action): + return False, "Rule control action, " + action + ", is not valid in rule " + self.line + + # TODO: Validate path + + return True, "Rule is valid " + self.line + + +# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this +# as a doubly linked list. +class PamdService(object): + + def __init__(self, content): + self._head = None + self._tail = None + for line in content.splitlines(): + if line.lstrip().startswith('#'): + pamd_line = PamdComment(line) + elif line.lstrip().startswith('@include'): + pamd_line = PamdInclude(line) + elif line.strip() == '': + pamd_line = PamdEmptyLine(line) + else: + pamd_line = PamdRule.rule_from_string(line) + + self.append(pamd_line) + + def append(self, pamd_line): + if self._head is None: + self._head = self._tail = pamd_line + else: + pamd_line.prev = self._tail + pamd_line.next = None + self._tail.next = pamd_line + self._tail = pamd_line + + def remove(self, rule_type, rule_control, rule_path): + current_line = self._head + changed = 0 + + while current_line is not None: + if current_line.matches(rule_type, rule_control, rule_path): + if current_line.prev is not None: + current_line.prev.next = current_line.next + if current_line.next is not None: + current_line.next.prev = current_line.prev + else: + self._head = current_line.next + current_line.next.prev = None + changed += 1 + + current_line = current_line.next + return changed + + def get(self, rule_type, rule_control, rule_path): + lines = [] + current_line = self._head + while current_line is not None: + + if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path): + lines.append(current_line) + + current_line = current_line.next + + return lines + + def has_rule(self, rule_type, rule_control, rule_path): + if self.get(rule_type, rule_control, rule_path): + return True + return False + + def update_rule(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + new_args = parse_module_arguments(new_args, return_none=True) + + changes = 0 + for current_rule in rules_to_find: + rule_changed = False + if new_type: + if current_rule.rule_type != new_type: + rule_changed = True + current_rule.rule_type = new_type + if new_control: + if current_rule.rule_control != new_control: + rule_changed = True + current_rule.rule_control = new_control + if new_path: + if current_rule.rule_path != new_path: + rule_changed = True + current_rule.rule_path = new_path + if new_args is not None: + if current_rule.rule_args != new_args: + rule_changed = True + current_rule.rule_args = new_args + + if rule_changed: + changes += 1 + + return changes + + def insert_before(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist before the existing rule + # 2. The new rule exists + + for current_rule in rules_to_find: + # Create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + # First we'll get the previous rule. + previous_rule = current_rule.prev + + # Next we may have to loop backwards if the previous line is a comment. If it + # is, we'll get the previous "rule's" previous. + while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)): + previous_rule = previous_rule.prev + # Next we'll see if the previous rule matches what we are trying to insert. + if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path): + # First set the original previous rule's next to the new_rule + previous_rule.next = new_rule + # Second, set the new_rule's previous to the original previous + new_rule.prev = previous_rule + # Third, set the new rule's next to the current rule + new_rule.next = current_rule + # Fourth, set the current rule's previous to the new_rule + current_rule.prev = new_rule + + changes += 1 + + # Handle the case where it is the first rule in the list. + elif previous_rule is None: + # This is the case where the current rule is not only the first rule + # but the first line as well. So we set the head to the new rule + if current_rule.prev is None: + self._head = new_rule + # This case would occur if the previous line was a comment. + else: + current_rule.prev.next = new_rule + new_rule.prev = current_rule.prev + new_rule.next = current_rule + current_rule.prev = new_rule + changes += 1 + + return changes + + def insert_after(self, rule_type, rule_control, rule_path, + new_type=None, new_control=None, new_path=None, new_args=None): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + changes = 0 + # There are two cases to consider. + # 1. The new rule doesn't exist after the existing rule + # 2. The new rule exists + for current_rule in rules_to_find: + # First we'll get the next rule. + next_rule = current_rule.next + # Next we may have to loop forwards if the next line is a comment. If it + # is, we'll get the next "rule's" next. + while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)): + next_rule = next_rule.next + + # First we create a new rule + new_rule = PamdRule(new_type, new_control, new_path, new_args) + if next_rule is not None and not next_rule.matches(new_type, new_control, new_path): + # If the previous rule doesn't match we'll insert our new rule. + + # Second set the original next rule's previous to the new_rule + next_rule.prev = new_rule + # Third, set the new_rule's next to the original next rule + new_rule.next = next_rule + # Fourth, set the new rule's previous to the current rule + new_rule.prev = current_rule + # Fifth, set the current rule's next to the new_rule + current_rule.next = new_rule + + changes += 1 + + # This is the case where the current_rule is the last in the list + elif next_rule is None: + new_rule.prev = self._tail + new_rule.next = None + self._tail.next = new_rule + self._tail = new_rule + + current_rule.next = new_rule + changes += 1 + + return changes + + def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_add = parse_module_arguments(args_to_add) + + changes = 0 + + for current_rule in rules_to_find: + rule_changed = False + + # create some structures to evaluate the situation + simple_new_args = set() + key_value_new_args = dict() + + for arg in args_to_add: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_new_args[key] = value + else: + simple_new_args.add(arg) + + key_value_new_args_set = set(key_value_new_args) + + simple_current_args = set() + key_value_current_args = dict() + + for arg in current_rule.rule_args: + if arg.startswith("["): + continue + elif "=" in arg: + key, value = arg.split("=") + key_value_current_args[key] = value + else: + simple_current_args.add(arg) + + key_value_current_args_set = set(key_value_current_args) + + new_args_to_add = list() + + # Handle new simple arguments + if simple_new_args.difference(simple_current_args): + for arg in simple_new_args.difference(simple_current_args): + new_args_to_add.append(arg) + + # Handle new key value arguments + if key_value_new_args_set.difference(key_value_current_args_set): + for key in key_value_new_args_set.difference(key_value_current_args_set): + new_args_to_add.append(key + '=' + key_value_new_args[key]) + + if new_args_to_add: + current_rule.rule_args += new_args_to_add + rule_changed = True + + # Handle existing key value arguments when value is not equal + if key_value_new_args_set.intersection(key_value_current_args_set): + for key in key_value_new_args_set.intersection(key_value_current_args_set): + if key_value_current_args[key] != key_value_new_args[key]: + arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key]) + current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key]) + rule_changed = True + + if rule_changed: + changes += 1 + + return changes + + def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove): + # Get a list of rules we want to change + rules_to_find = self.get(rule_type, rule_control, rule_path) + + args_to_remove = parse_module_arguments(args_to_remove) + + changes = 0 + + for current_rule in rules_to_find: + if not args_to_remove: + args_to_remove = [] + + # Let's check to see if there are any args to remove by finding the intersection + # of the rule's current args and the args_to_remove lists + if not list(set(current_rule.rule_args) & set(args_to_remove)): + continue + + # There are args to remove, so we create a list of new_args absent the args + # to remove. + current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove] + + changes += 1 + + return changes + + def validate(self): + current_line = self._head + + while current_line is not None: + curr_validate = current_line.validate() + if not curr_validate[0]: + return curr_validate + current_line = current_line.next + return True, "Module is valid" + + def __str__(self): + lines = [] + current_line = self._head + + mark = "# Updated by Ansible - %s" % datetime.now().isoformat() + while current_line is not None: + lines.append(str(current_line)) + current_line = current_line.next + + if len(lines) <= 1: + lines.insert(0, "") + lines.insert(1, mark) + else: + if lines[1].startswith("# Updated by Ansible"): + lines[1] = mark + else: + lines.insert(1, mark) + + return '\n'.join(lines) + '\n' + + +def parse_module_arguments(module_arguments, return_none=False): + # If args is None, return empty list by default. + # But if return_none is True, then return None + if module_arguments is None: + return None if return_none else [] + if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: + return [] + + if not isinstance(module_arguments, list): + module_arguments = [module_arguments] + + # From this point on, module_arguments is guaranteed to be a list, empty or not + parsed_args = [] + + re_clear_spaces = re.compile(r"\s*=\s*") + for arg in module_arguments: + for item in filter(None, RULE_ARG_REGEX.findall(arg)): + if not item.startswith("["): + re_clear_spaces.sub("=", item) + parsed_args.append(item) + + return parsed_args + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + type=dict(type='str', required=True, choices=VALID_TYPES), + control=dict(type='str', required=True), + module_path=dict(type='str', required=True), + new_type=dict(type='str', choices=VALID_TYPES), + new_control=dict(type='str'), + new_module_path=dict(type='str'), + module_arguments=dict(type='list', elements='str'), + state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']), + path=dict(type='path', default='/etc/pam.d'), + backup=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_if=[ + ("state", "args_present", ["module_arguments"]), + ("state", "args_absent", ["module_arguments"]), + ("state", "before", ["new_control", "new_type", "new_module_path"]), + ("state", "after", ["new_control", "new_type", "new_module_path"]), + ], + ) + content = str() + fname = os.path.join(module.params["path"], module.params["name"]) + + # Open the file and read the content or fail + try: + with open(fname, 'r') as service_file_obj: + content = service_file_obj.read() + except IOError as e: + # If unable to read the file, fail out + module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e))) + + # Assuming we didn't fail, create the service + service = PamdService(content) + # Set the action + action = module.params['state'] + + changes = 0 + + # Take action + if action == 'updated': + changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'before': + changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'after': + changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'], + module.params['new_type'], module.params['new_control'], module.params['new_module_path'], + module.params['module_arguments']) + elif action == 'args_absent': + changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'args_present': + if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]: + module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.") + + changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'], + module.params['module_arguments']) + elif action == 'absent': + changes = service.remove(module.params['type'], module.params['control'], module.params['module_path']) + + valid, msg = service.validate() + + # If the module is not valid (meaning one of the rules is invalid), we will fail + if not valid: + module.fail_json(msg=msg) + + result = dict( + changed=(changes > 0), + change_count=changes, + backupdest='', + ) + + # If not check mode and something changed, backup the original if necessary then write out the file or fail + if not module.check_mode and result['changed']: + # First, create a backup if desired. + if module.params['backup']: + result['backupdest'] = module.backup_local(fname) + try: + temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False) + with open(temp_file.name, 'w') as fd: + fd.write(str(service)) + + except IOError: + module.fail_json(msg='Unable to create temporary file %s' % temp_file) + + module.atomic_move(temp_file.name, os.path.realpath(fname)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/parted.py b/ansible_collections/community/general/plugins/modules/parted.py new file mode 100644 index 000000000..8e6038180 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/parted.py @@ -0,0 +1,810 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Fabrizio Colonna +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +author: + - Fabrizio Colonna (@ColOfAbRiX) +module: parted +short_description: Configure block device partitions +description: + - This module allows configuring block device partition using the C(parted) + command line tool. For a full description of the fields and the options + check the GNU parted manual. +requirements: + - This module requires C(parted) version 1.8.3 and above. + - Option I(align) (except C(undefined)) requires C(parted) 2.1 or above. + - If the version of C(parted) is below 3.1, it requires a Linux version running + the C(sysfs) file system C(/sys/). + - Requires the C(resizepart) command when using the I(resize) parameter. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + device: + description: + - The block device (disk) where to operate. + - Regular files can also be partitioned, but it is recommended to create a + loopback device using C(losetup) to easily access its partitions. + type: str + required: true + align: + description: + - Set alignment for newly created partitions. Use C(undefined) for parted default aligment. + type: str + choices: [ cylinder, minimal, none, optimal, undefined ] + default: optimal + number: + description: + - The partition number being affected. + - Required when performing any action on the disk, except fetching information. + type: int + unit: + description: + - Selects the current default unit that Parted will use to display + locations and capacities on the disk and to interpret those given by the + user if they are not suffixed by an unit. + - When fetching information about a disk, it is recommended to always specify a unit. + type: str + choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ] + default: KiB + label: + description: + - Disk label type or partition table to use. + - If I(device) already contains a different label, it will be changed to I(label) + and any previous partitions will be lost. + - A I(name) must be specified for a C(gpt) partition table. + type: str + choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ] + default: msdos + part_type: + description: + - May be specified only with I(label=msdos) or I(label=dvh). + - Neither I(part_type) nor I(name) may be used with I(label=sun). + type: str + choices: [ extended, logical, primary ] + default: primary + part_start: + description: + - Where the partition will start as offset from the beginning of the disk, + that is, the "distance" from the start of the disk. Negative numbers + specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted + (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). + - Using negative values may require setting of I(fs_type) (see notes). + type: str + default: 0% + part_end: + description: + - Where the partition will end as offset from the beginning of the disk, + that is, the "distance" from the start of the disk. Negative numbers + specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted + (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). + type: str + default: 100% + name: + description: + - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only). + type: str + flags: + description: A list of the flags that has to be set on the partition. + type: list + elements: str + state: + description: + - Whether to create or delete a partition. + - If set to C(info) the module will only return the device information. + type: str + choices: [ absent, present, info ] + default: info + fs_type: + description: + - If specified and the partition does not exist, will set filesystem type to given partition. + - Parameter optional, but see notes below about negative I(part_start) values. + type: str + version_added: '0.2.0' + resize: + description: + - Call C(resizepart) on existing partitions to match the size specified by I(part_end). + type: bool + default: false + version_added: '1.3.0' + +notes: + - When fetching information about a new disk and when the version of parted + installed on the system is before version 3.1, the module queries the kernel + through C(/sys/) to obtain disk information. In this case the units CHS and + CYL are not supported. + - Negative I(part_start) start values were rejected if I(fs_type) was not given. + This bug was fixed in parted 3.2.153. If you want to use negative I(part_start), + specify I(fs_type) as well or make sure your system contains newer parted. +''' + +RETURN = r''' +partition_info: + description: Current partition information + returned: success + type: complex + contains: + disk: + description: Generic device information. + type: dict + partitions: + description: List of device partitions. + type: list + script: + description: parted script executed by module + type: str + sample: { + "disk": { + "dev": "/dev/sdb", + "logical_block": 512, + "model": "VMware Virtual disk", + "physical_block": 512, + "size": 5.0, + "table": "msdos", + "unit": "gib" + }, + "partitions": [{ + "begin": 0.0, + "end": 1.0, + "flags": ["boot", "lvm"], + "fstype": "", + "name": "", + "num": 1, + "size": 1.0 + }, { + "begin": 1.0, + "end": 5.0, + "flags": [], + "fstype": "", + "name": "", + "num": 2, + "size": 4.0 + }], + "script": "unit KiB print " + } +''' + +EXAMPLES = r''' +- name: Create a new ext4 primary partition + community.general.parted: + device: /dev/sdb + number: 1 + state: present + fs_type: ext4 + +- name: Remove partition number 1 + community.general.parted: + device: /dev/sdb + number: 1 + state: absent + +- name: Create a new primary partition with a size of 1GiB + community.general.parted: + device: /dev/sdb + number: 1 + state: present + part_end: 1GiB + +- name: Create a new primary partition for LVM + community.general.parted: + device: /dev/sdb + number: 2 + flags: [ lvm ] + state: present + part_start: 1GiB + +- name: Create a new primary partition with a size of 1GiB at disk's end + community.general.parted: + device: /dev/sdb + number: 3 + state: present + fs_type: ext3 + part_start: -1GiB + +# Example on how to read info and reuse it in subsequent task +- name: Read device information (always use unit when probing) + community.general.parted: device=/dev/sdb unit=MiB + register: sdb_info + +- name: Remove all partitions from disk + community.general.parted: + device: /dev/sdb + number: '{{ item.num }}' + state: absent + loop: '{{ sdb_info.partitions }}' + +- name: Extend an existing partition to fill all available space + community.general.parted: + device: /dev/sdb + number: "{{ sdb_info.partitions | length }}" + part_end: "100%" + resize: true + state: present +''' + + +from ansible.module_utils.basic import AnsibleModule +import math +import re +import os + + +# Reference prefixes (International System of Units and IEC) +units_si = ['B', 'KB', 'MB', 'GB', 'TB'] +units_iec = ['KiB', 'MiB', 'GiB', 'TiB'] +parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact'] + + +def parse_unit(size_str, unit=''): + """ + Parses a string containing a size or boundary information + """ + matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str) + if matches is None: + # ",," format + matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str) + if matches is None: + module.fail_json( + msg="Error interpreting parted size output: '%s'" % size_str + ) + + size = { + 'cylinder': int(matches.group(1)), + 'head': int(matches.group(2)), + 'sector': int(matches.group(3)) + } + unit = 'chs' + + else: + # Normal format: "[]" + if matches.group(2) is not None: + unit = matches.group(2) + + size = float(matches.group(1)) + + return size, unit + + +def parse_partition_info(parted_output, unit): + """ + Parses the output of parted and transforms the data into + a dictionary. + + Parted Machine Parseable Output: + See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00 + 0573.html + - All lines end with a semicolon (;) + - The first line indicates the units in which the output is expressed. + CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively. + - The second line is made of disk information in the following format: + "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz + e":"partition-table-type":"model-name"; + - If the first line was either CYL or CHS, the next line will contain + information on no. of cylinders, heads, sectors and cylinder size. + - Partition information begins from the next line. This is of the format: + (for BYT) + "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s + et"; + (for CHS/CYL) + "number":"begin":"end":"filesystem-type":"partition-name":"flags-set"; + """ + lines = [x for x in parted_output.split('\n') if x.strip() != ''] + + # Generic device info + generic_params = lines[1].rstrip(';').split(':') + + # The unit is read once, because parted always returns the same unit + size, unit = parse_unit(generic_params[1], unit) + + generic = { + 'dev': generic_params[0], + 'size': size, + 'unit': unit.lower(), + 'table': generic_params[5], + 'model': generic_params[6], + 'logical_block': int(generic_params[3]), + 'physical_block': int(generic_params[4]) + } + + # CYL and CHS have an additional line in the output + if unit in ['cyl', 'chs']: + chs_info = lines[2].rstrip(';').split(':') + cyl_size, cyl_unit = parse_unit(chs_info[3]) + generic['chs_info'] = { + 'cylinders': int(chs_info[0]), + 'heads': int(chs_info[1]), + 'sectors': int(chs_info[2]), + 'cyl_size': cyl_size, + 'cyl_size_unit': cyl_unit.lower() + } + lines = lines[1:] + + parts = [] + for line in lines[2:]: + part_params = line.rstrip(';').split(':') + + # CHS use a different format than BYT, but contrary to what stated by + # the author, CYL is the same as BYT. I've tested this undocumented + # behaviour down to parted version 1.8.3, which is the first version + # that supports the machine parseable output. + if unit != 'chs': + size = parse_unit(part_params[3])[0] + fstype = part_params[4] + name = part_params[5] + flags = part_params[6] + + else: + size = "" + fstype = part_params[3] + name = part_params[4] + flags = part_params[5] + + parts.append({ + 'num': int(part_params[0]), + 'begin': parse_unit(part_params[1])[0], + 'end': parse_unit(part_params[2])[0], + 'size': size, + 'fstype': fstype, + 'name': name, + 'flags': [f.strip() for f in flags.split(', ') if f != ''], + 'unit': unit.lower(), + }) + + return {'generic': generic, 'partitions': parts} + + +def format_disk_size(size_bytes, unit): + """ + Formats a size in bytes into a different unit, like parted does. It doesn't + manage CYL and CHS formats, though. + This function has been adapted from https://github.com/Distrotech/parted/blo + b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c + """ + global units_si, units_iec # pylint: disable=global-variable-not-assigned + + unit = unit.lower() + + # Shortcut + if size_bytes == 0: + return 0.0, 'b' + + # Cases where we default to 'compact' + if unit in ['', 'compact', 'cyl', 'chs']: + index = max(0, int( + (math.log10(size_bytes) - 1.0) / 3.0 + )) + unit = 'b' + if index < len(units_si): + unit = units_si[index] + + # Find the appropriate multiplier + multiplier = 1.0 + if unit in units_si: + multiplier = 1000.0 ** units_si.index(unit) + elif unit in units_iec: + multiplier = 1024.0 ** units_iec.index(unit) + + output = size_bytes // multiplier * (1 + 1E-16) + + # Corrections to round up as per IEEE754 standard + if output < 10: + w = output + 0.005 + elif output < 100: + w = output + 0.05 + else: + w = output + 0.5 + + if w < 10: + precision = 2 + elif w < 100: + precision = 1 + else: + precision = 0 + + # Round and return + return round(output, precision), unit + + +def convert_to_bytes(size_str, unit): + size = float(size_str) + multiplier = 1.0 + if unit in units_si: + multiplier = 1000.0 ** units_si.index(unit) + elif unit in units_iec: + multiplier = 1024.0 ** (units_iec.index(unit) + 1) + elif unit in ['', 'compact', 'cyl', 'chs']: + # As per format_disk_size, default to compact, which defaults to megabytes + multiplier = 1000.0 ** units_si.index("MB") + + output = size * multiplier + return int(output) + + +def get_unlabeled_device_info(device, unit): + """ + Fetches device information directly from the kernel and it is used when + parted cannot work because of a missing label. It always returns a 'unknown' + label. + """ + device_name = os.path.basename(device) + base = "/sys/block/%s" % device_name + + vendor = read_record(base + "/device/vendor", "Unknown") + model = read_record(base + "/device/model", "model") + logic_block = int(read_record(base + "/queue/logical_block_size", 0)) + phys_block = int(read_record(base + "/queue/physical_block_size", 0)) + size_bytes = int(read_record(base + "/size", 0)) * logic_block + + size, unit = format_disk_size(size_bytes, unit) + + return { + 'generic': { + 'dev': device, + 'table': "unknown", + 'size': size, + 'unit': unit, + 'logical_block': logic_block, + 'physical_block': phys_block, + 'model': "%s %s" % (vendor, model), + }, + 'partitions': [] + } + + +def get_device_info(device, unit): + """ + Fetches information about a disk and its partitions and it returns a + dictionary. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + # If parted complains about missing labels, it means there are no partitions. + # In this case only, use a custom function to fetch information and emulate + # parted formats for the unit. + label_needed = check_parted_label(device) + if label_needed: + return get_unlabeled_device_info(device, unit) + + command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit) + rc, out, err = module.run_command(command) + if rc != 0 and 'unrecognised disk label' not in err: + module.fail_json(msg=( + "Error while getting device information with parted " + "script: '%s'" % command), + rc=rc, out=out, err=err + ) + + return parse_partition_info(out, unit) + + +def check_parted_label(device): + """ + Determines if parted needs a label to complete its duties. Versions prior + to 3.1 don't return data when there is no label. For more information see: + http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html + """ + global parted_exec # pylint: disable=global-variable-not-assigned + + # Check the version + parted_major, parted_minor, dummy = parted_version() + if (parted_major == 3 and parted_minor >= 1) or parted_major > 3: + return False + + # Older parted versions return a message in the stdout and RC > 0. + rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device)) + if rc != 0 and 'unrecognised disk label' in out.lower(): + return True + + return False + + +def parse_parted_version(out): + """ + Returns version tuple from the output of "parted --version" command + """ + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + return None, None, None + + # Sample parted versions (see as well test unit): + # parted (GNU parted) 3.3 + # parted (GNU parted) 3.4.5 + # parted (GNU parted) 3.3.14-dfc61 + matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip()) + + if matches is None: + return None, None, None + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + +def parted_version(): + """ + Returns the major and minor version of parted installed on the system. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + rc, out, err = module.run_command("%s --version" % parted_exec) + if rc != 0: + module.fail_json( + msg="Failed to get parted version.", rc=rc, out=out, err=err + ) + + (major, minor, rev) = parse_parted_version(out) + if major is None: + module.fail_json(msg="Failed to get parted version.", rc=0, out=out) + + return major, minor, rev + + +def parted(script, device, align): + """ + Runs a parted script. + """ + global module, parted_exec # pylint: disable=global-variable-not-assigned + + align_option = '-a %s' % align + if align == 'undefined': + align_option = '' + + if script and not module.check_mode: + command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script) + rc, out, err = module.run_command(command) + + if rc != 0: + module.fail_json( + msg="Error while running parted script: %s" % command.strip(), + rc=rc, out=out, err=err + ) + + +def read_record(file_path, default=None): + """ + Reads the first line of a file and returns it. + """ + try: + f = open(file_path, 'r') + try: + return f.readline().strip() + finally: + f.close() + except IOError: + return default + + +def part_exists(partitions, attribute, number): + """ + Looks if a partition that has a specific value for a specific attribute + actually exists. + """ + return any( + part[attribute] and + part[attribute] == number for part in partitions + ) + + +def check_size_format(size_str): + """ + Checks if the input string is an allowed size + """ + size, unit = parse_unit(size_str) + return unit in parted_units + + +def main(): + global module, units_si, units_iec, parted_exec # pylint: disable=global-variable-not-assigned + + changed = False + output_script = "" + script = "" + module = AnsibleModule( + argument_spec=dict( + device=dict(type='str', required=True), + align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']), + number=dict(type='int'), + + # unit command + unit=dict(type='str', default='KiB', choices=parted_units), + + # mklabel command + label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']), + + # mkpart [] command + part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']), + part_start=dict(type='str', default='0%'), + part_end=dict(type='str', default='100%'), + fs_type=dict(type='str'), + + # name command + name=dict(type='str'), + + # set command + flags=dict(type='list', elements='str'), + + # rm/mkpart command + state=dict(type='str', default='info', choices=['absent', 'info', 'present']), + + # resize part + resize=dict(type='bool', default=False), + ), + required_if=[ + ['state', 'present', ['number']], + ['state', 'absent', ['number']], + ], + supports_check_mode=True, + ) + module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'} + + # Data extraction + device = module.params['device'] + align = module.params['align'] + number = module.params['number'] + unit = module.params['unit'] + label = module.params['label'] + part_type = module.params['part_type'] + part_start = module.params['part_start'] + part_end = module.params['part_end'] + name = module.params['name'] + state = module.params['state'] + flags = module.params['flags'] + fs_type = module.params['fs_type'] + resize = module.params['resize'] + + # Parted executable + parted_exec = module.get_bin_path('parted', True) + + # Conditioning + if number is not None and number < 1: + module.fail_json(msg="The partition number must be greater then 0.") + if not check_size_format(part_start): + module.fail_json( + msg="The argument 'part_start' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_start) + ) + if not check_size_format(part_end): + module.fail_json( + msg="The argument 'part_end' doesn't respect required format." + "The size unit is case sensitive.", + err=parse_unit(part_end) + ) + + # Read the current disk information + current_device = get_device_info(device, unit) + current_parts = current_device['partitions'] + + if state == 'present': + + # Assign label if required + mklabel_needed = current_device['generic'].get('table', None) != label + if mklabel_needed: + script += "mklabel %s " % label + + # Create partition if required + if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)): + script += "mkpart %s %s%s %s " % ( + part_type, + '%s ' % fs_type if fs_type is not None else '', + part_start, + part_end + ) + + # Set the unit of the run + if unit and script: + script = "unit %s %s" % (unit, script) + + # If partition exists, try to resize + if resize and part_exists(current_parts, 'num', number): + # Ensure new end is different to current + partition = [p for p in current_parts if p['num'] == number][0] + current_part_end = convert_to_bytes(partition['end'], unit) + + size, parsed_unit = parse_unit(part_end, unit) + if parsed_unit == "%": + size = int((int(current_device['generic']['size']) * size) / 100) + parsed_unit = unit + + desired_part_end = convert_to_bytes(size, parsed_unit) + + if current_part_end != desired_part_end: + script += "resizepart %s %s " % ( + number, + part_end + ) + + # Execute the script and update the data structure. + # This will create the partition for the next steps + if script: + output_script += script + parted(script, device, align) + changed = True + script = "" + + if not module.check_mode: + current_parts = get_device_info(device, unit)['partitions'] + + if part_exists(current_parts, 'num', number) or module.check_mode: + if changed and module.check_mode: + partition = {'flags': []} # Empty structure for the check-mode + else: + partition = [p for p in current_parts if p['num'] == number][0] + + # Assign name to the partition + if name is not None and partition.get('name', None) != name: + # Wrap double quotes in single quotes so the shell doesn't strip + # the double quotes as those need to be included in the arg + # passed to parted + script += 'name %s \'"%s"\' ' % (number, name) + + # Manage flags + if flags: + # Parted infers boot with esp, if you assign esp, boot is set + # and if boot is unset, esp is also unset. + if 'esp' in flags and 'boot' not in flags: + flags.append('boot') + + # Compute only the changes in flags status + flags_off = list(set(partition['flags']) - set(flags)) + flags_on = list(set(flags) - set(partition['flags'])) + + for f in flags_on: + script += "set %s %s on " % (number, f) + + for f in flags_off: + script += "set %s %s off " % (number, f) + + # Set the unit of the run + if unit and script: + script = "unit %s %s" % (unit, script) + + # Execute the script + if script: + output_script += script + changed = True + parted(script, device, align) + + elif state == 'absent': + # Remove the partition + if part_exists(current_parts, 'num', number) or module.check_mode: + script = "rm %s " % number + output_script += script + changed = True + parted(script, device, align) + + elif state == 'info': + output_script = "unit '%s' print " % unit + + # Final status of the device + final_device_status = get_device_info(device, unit) + module.exit_json( + changed=changed, + disk=final_device_status['generic'], + partitions=final_device_status['partitions'], + script=output_script.strip() + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pear.py b/ansible_collections/community/general/plugins/modules/pear.py new file mode 100644 index 000000000..d7cb01b92 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pear.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Jonathan Lestrelin +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pear +short_description: Manage pear/pecl packages +description: + - Manage PHP packages with the pear package manager. +author: + - Jonathan Lestrelin (@jle64) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the package to install, upgrade, or remove. + required: true + aliases: [pkg] + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "installed", "latest", "absent", "removed"] + executable: + type: path + description: + - Path to the pear executable. + prompts: + description: + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. + - Prompts will be processed in the same order as the packages list. + - You can optionnally specify an answer to any question in the list. + - If no answer is provided, the list item will only contain the regular expression. + - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." + - You can provide a list containing items with or without answer. + - A prompt list can be shorter or longer than the packages list but will issue a warning. + - If you want to specify that a package will not need prompts in the middle of a list, C(null). + type: list + elements: raw + version_added: 0.2.0 +''' + +EXAMPLES = r''' +- name: Install pear package + community.general.pear: + name: Net_URL2 + state: present + +- name: Install pecl package + community.general.pear: + name: pecl/json_post + state: present + +- name: Install pecl package with expected prompt + community.general.pear: + name: pecl/apcu + state: present + prompts: + - (.*)Enable internal debugging in APCu \[no\] + +- name: Install pecl package with expected prompt and an answer + community.general.pear: + name: pecl/apcu + state: present + prompts: + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Install multiple pear/pecl packages at once with prompts. + Prompts will be processed on the same order as the packages order. + If there is more prompts than packages, packages without prompts will be installed without any prompt expected. + If there is more packages than prompts, additionnal prompts will be ignored. + community.general.pear: + name: pecl/gnupg, pecl/apcu + state: present + prompts: + - I am a test prompt because gnupg doesnt asks anything + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Install multiple pear/pecl packages at once skipping the first prompt. + Prompts will be processed on the same order as the packages order. + If there is more prompts than packages, packages without prompts will be installed without any prompt expected. + If there is more packages than prompts, additionnal prompts will be ignored. + community.general.pear: + name: pecl/gnupg, pecl/apcu + state: present + prompts: + - null + - (.*)Enable internal debugging in APCu \[no\]: "yes" + +- name: Upgrade package + community.general.pear: + name: Net_URL2 + state: latest + +- name: Remove packages + community.general.pear: + name: Net_URL2,pecl/json_post + state: absent +''' + +import os + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.basic import AnsibleModule + + +def get_local_version(pear_output): + """Take pear remoteinfo output and get the installed version""" + lines = pear_output.split('\n') + for line in lines: + if 'Installed ' in line: + installed = line.rsplit(None, 1)[-1].strip() + if installed == '-': + continue + return installed + return None + + +def _get_pear_path(module): + if module.params['executable'] and os.path.isfile(module.params['executable']): + result = module.params['executable'] + else: + result = module.get_bin_path('pear', True, [module.params['executable']]) + return result + + +def get_repository_version(pear_output): + """Take pear remote-info output and get the latest version""" + lines = pear_output.split('\n') + for line in lines: + if 'Latest ' in line: + return line.rsplit(None, 1)[-1].strip() + return None + + +def query_package(module, name, state="present"): + """Query the package status in both the local system and the repository. + Returns a boolean to indicate if the package is installed, + and a second boolean to indicate if the package is up-to-date.""" + if state == "present": + lcmd = "%s info %s" % (_get_pear_path(module), name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + rcmd = "%s remote-info %s" % (_get_pear_path(module), name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + + +def remove_packages(module, packages): + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, package) + if not installed: + continue + + cmd = "%s uninstall %s" % (_get_pear_path(module), package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr))) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, state, packages, prompts): + install_c = 0 + has_prompt = bool(prompts) + default_stdin = "\n" + + if has_prompt: + nb_prompts = len(prompts) + nb_packages = len(packages) + + if nb_prompts > 0 and (nb_prompts != nb_packages): + if nb_prompts > nb_packages: + diff = nb_prompts - nb_packages + msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) + else: + diff = nb_packages - nb_prompts + msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \ + % (to_text(nb_packages), to_text(nb_prompts), to_text(diff)) + module.warn(msg) + + # Preparing prompts answer according to item type + tmp_prompts = [] + for _item in prompts: + # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer + # We also expect here that the dict only has ONE key and the first key will be taken + if isinstance(_item, dict): + key = list(_item.keys())[0] + answer = _item[key] + "\n" + + tmp_prompts.append((key, answer)) + elif not _item: + tmp_prompts.append((None, default_stdin)) + else: + tmp_prompts.append((_item, default_stdin)) + prompts = tmp_prompts + for i, package in enumerate(packages): + # if the package is installed and state == present + # or state == latest and is up-to-date then skip + installed, updated = query_package(module, package) + if installed and (state == 'present' or (state == 'latest' and updated)): + continue + + if state == 'present': + command = 'install' + + if state == 'latest': + command = 'upgrade' + + if has_prompt and i < len(prompts): + prompt_regex = prompts[i][0] + data = prompts[i][1] + else: + prompt_regex = None + data = default_stdin + + cmd = "%s %s %s" % (_get_pear_path(module), command, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) + if rc != 0: + module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), + executable=dict(default=None, required=False, type='path'), + prompts=dict(default=None, required=False, type='list', elements='raw'), + ), + supports_check_mode=True) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, p["prompts"]) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pids.py b/ansible_collections/community/general/plugins/modules/pids.py new file mode 100644 index 000000000..665adb142 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pids.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +module: pids +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." +short_description: Retrieves process IDs list if the process is running otherwise return empty list +author: + - Saranya Sridharan (@saranyasridharan) +requirements: + - psutil(python module) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: The name of the process(es) you want to get PID(s) for. + type: str + pattern: + description: The pattern (regular expression) to match the process(es) you want to get PID(s) for. + type: str + version_added: 3.0.0 + ignore_case: + description: Ignore case in pattern if using the I(pattern) option. + type: bool + default: false + version_added: 3.0.0 +''' + +EXAMPLES = r''' +# Pass the process name +- name: Getting process IDs of the process + community.general.pids: + name: python + register: pids_of_python + +- name: Printing the process IDs obtained + ansible.builtin.debug: + msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" + +- name: Getting process IDs of processes matching pattern + community.general.pids: + pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py + register: myapp_pids +''' + +RETURN = ''' +pids: + description: Process IDs of the given process + returned: list of none, one, or more process IDs + type: list + sample: [100,200] +''' + +import abc +import re +from os.path import basename + +from ansible.module_utils import six +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +with deps.declare("psutil"): + import psutil + + +class PSAdapterError(Exception): + pass + + +@six.add_metaclass(abc.ABCMeta) +class PSAdapter(object): + NAME_ATTRS = ('name', 'cmdline') + PATTERN_ATTRS = ('name', 'exe', 'cmdline') + + def __init__(self, psutil): + self._psutil = psutil + + @staticmethod + def from_package(psutil): + version = LooseVersion(psutil.__version__) + if version < LooseVersion('2.0.0'): + return PSAdapter100(psutil) + elif version < LooseVersion('5.3.0'): + return PSAdapter200(psutil) + else: + return PSAdapter530(psutil) + + def get_pids_by_name(self, name): + return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)] + + def _process_iter(self, *attrs): + return self._psutil.process_iter() + + def _has_name(self, proc, name): + attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS) + return (compare_lower(attributes['name'], name) or + attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) + + def _get_proc_attributes(self, proc, *attributes): + return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + + @staticmethod + @abc.abstractmethod + def _get_attribute_from_proc(proc, attribute): + pass + + def get_pids_by_pattern(self, pattern, ignore_case): + flags = 0 + if ignore_case: + flags |= re.I + + try: + regex = re.compile(pattern, flags) + except re.error as e: + raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) + + return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)] + + def _matches_regex(self, proc, regex): + # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information + attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS) + matches_name = regex.search(to_native(attributes['name'])) + matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe']))) + matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline']))) + + return any([matches_name, matches_exe, matches_cmd]) + + +class PSAdapter100(PSAdapter): + def __init__(self, psutil): + super(PSAdapter100, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return getattr(proc, attribute) + + +class PSAdapter200(PSAdapter): + def __init__(self, psutil): + super(PSAdapter200, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + method = getattr(proc, attribute) + return method() + + +class PSAdapter530(PSAdapter): + def __init__(self, psutil): + super(PSAdapter530, self).__init__(psutil) + + def _process_iter(self, *attrs): + return self._psutil.process_iter(attrs=attrs) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return proc.info[attribute] + + +def compare_lower(a, b): + if a is None or b is None: + # this could just be "return False" but would lead to surprising behavior if both a and b are None + return a == b + + return a.lower() == b.lower() + + +class Pids(object): + def __init__(self, module): + + deps.validate(module) + + self._ps = PSAdapter.from_package(psutil) + + self._module = module + self._name = module.params['name'] + self._pattern = module.params['pattern'] + self._ignore_case = module.params['ignore_case'] + + self._pids = [] + + def execute(self): + if self._name: + self._pids = self._ps.get_pids_by_name(self._name) + else: + try: + self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case) + except PSAdapterError as e: + self._module.fail_json(msg=to_native(e)) + + return self._module.exit_json(**self.result) + + @property + def result(self): + return { + 'pids': self._pids, + } + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="str"), + pattern=dict(type="str"), + ignore_case=dict(type="bool", default=False), + ), + required_one_of=[ + ('name', 'pattern') + ], + mutually_exclusive=[ + ('name', 'pattern') + ], + supports_check_mode=True, + ) + + Pids(module).execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pingdom.py b/ansible_collections/community/general/plugins/modules/pingdom.py new file mode 100644 index 000000000..bd4826a78 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pingdom.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: pingdom +short_description: Pause/unpause Pingdom alerts +description: + - This module will let you pause/unpause Pingdom alerts +author: + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" +requirements: + - "This pingdom python library: https://github.com/mbabineau/pingdom-python" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Define whether or not the check should be running or paused. + required: true + choices: [ "running", "paused", "started", "stopped" ] + checkid: + type: str + description: + - Pingdom ID of the check. + required: true + uid: + type: str + description: + - Pingdom user ID. + required: true + passwd: + type: str + description: + - Pingdom user password. + required: true + key: + type: str + description: + - Pingdom API key. + required: true +notes: + - This module does not yet have support to add/remove checks. +''' + +EXAMPLES = ''' +- name: Pause the check with the ID of 12345 + community.general.pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: paused + +- name: Unpause the check with the ID of 12345 + community.general.pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: running +''' + +import traceback + +PINGDOM_IMP_ERR = None +try: + import pingdom + HAS_PINGDOM = True +except Exception: + PINGDOM_IMP_ERR = traceback.format_exc() + HAS_PINGDOM = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +def pause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=True) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "paused": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def unpause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=False) + check = c.get_check(checkid) + name = check.name + result = check.status + # if result != "up": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), + checkid=dict(required=True), + uid=dict(required=True), + passwd=dict(required=True, no_log=True), + key=dict(required=True, no_log=True), + ) + ) + + if not HAS_PINGDOM: + module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR) + + checkid = module.params['checkid'] + state = module.params['state'] + uid = module.params['uid'] + passwd = module.params['passwd'] + key = module.params['key'] + + if (state == "paused" or state == "stopped"): + (rc, name, result) = pause(checkid, uid, passwd, key) + + if (state == "running" or state == "started"): + (rc, name, result) = unpause(checkid, uid, passwd, key) + + if rc != 0: + module.fail_json(checkid=checkid, name=name, status=result) + + module.exit_json(checkid=checkid, name=name, status=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pip_package_info.py b/ansible_collections/community/general/plugins/modules/pip_package_info.py new file mode 100644 index 000000000..2cde7218d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pip_package_info.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# started out with AWX's scan_packages module + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: pip_package_info +short_description: Pip package information +description: + - Return information about installed pip packages +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + clients: + description: + - A list of the pip executables that will be used to get the packages. + They can be supplied with the full path or just the executable name, for example C(pip3.7). + default: ['pip'] + required: false + type: list + elements: path +requirements: + - The requested pip executables must be installed on the target. +author: + - Matthew Jones (@matburt) + - Brian Coca (@bcoca) + - Adam Miller (@maxamillion) +''' + +EXAMPLES = ''' +- name: Just get the list from default pip + community.general.pip_package_info: + +- name: Get the facts for default pip, pip2 and pip3.6 + community.general.pip_package_info: + clients: ['pip', 'pip2', 'pip3.6'] + +- name: Get from specific paths (virtualenvs?) + community.general.pip_package_info: + clients: '/home/me/projec42/python/pip3.5' +''' + +RETURN = ''' +packages: + description: a dictionary of installed package data + returned: always + type: dict + contains: + python: + description: A dictionary with each pip client which then contains a list of dicts with python package information + returned: always + type: dict + sample: + "packages": { + "pip": { + "Babel": [ + { + "name": "Babel", + "source": "pip", + "version": "2.6.0" + } + ], + "Flask": [ + { + "name": "Flask", + "source": "pip", + "version": "1.0.2" + } + ], + "Flask-SQLAlchemy": [ + { + "name": "Flask-SQLAlchemy", + "source": "pip", + "version": "2.3.2" + } + ], + "Jinja2": [ + { + "name": "Jinja2", + "source": "pip", + "version": "2.10" + } + ], + }, + } +''' +import json +import os + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts.packages import CLIMgr + + +class PIP(CLIMgr): + + def __init__(self, pip, module): + + self.CLI = pip + self.module = module + + def list_installed(self): + rc, out, err = self.module.run_command([self._cli, 'list', '-l', '--format=json']) + if rc != 0: + raise Exception("Unable to list packages rc=%s : %s" % (rc, err)) + return json.loads(out) + + def get_package_details(self, package): + package['source'] = self.CLI + return package + + +def main(): + + # start work + module = AnsibleModule( + argument_spec=dict( + clients=dict(type='list', elements='path', default=['pip']), + ), + supports_check_mode=True) + packages = {} + results = {'packages': {}} + clients = module.params['clients'] + + found = 0 + for pip in clients: + + if not os.path.basename(pip).startswith('pip'): + module.warn('Skipping invalid pip client: %s' % (pip)) + continue + try: + pip_mgr = PIP(pip, module) + if pip_mgr.is_available(): + found += 1 + packages[pip] = pip_mgr.get_packages() + except Exception as e: + module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e))) + continue + + if found == 0: + module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients) + + # return info + results['packages'] = packages + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pipx.py b/ansible_collections/community/general/plugins/modules/pipx.py new file mode 100644 index 000000000..dfa2f4300 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pipx.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pipx +short_description: Manages applications installed with pipx +version_added: 3.8.0 +description: + - Manage Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + type: str + choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all, latest] + default: install + description: + - Desired state for the application. + - The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively. + - The state C(latest) is equivalent to executing the task twice, with state C(install) and then C(upgrade). + It was added in community.general 5.5.0. + name: + type: str + description: + - > + The name of the application to be installed. It must to be a simple package name. + For passing package specifications or installing from URLs or directories, + please use the I(source) option. + source: + type: str + description: + - > + If the application source, such as a package with version specifier, or an URL, + directory or any other accepted specification. See C(pipx) documentation for more details. + - When specified, the C(pipx) command will use I(source) instead of I(name). + install_apps: + description: + - Add apps from the injected packages. + - Only used when I(state=inject). + type: bool + default: false + version_added: 6.5.0 + install_deps: + description: + - Include applications of dependent packages. + - Only used when I(state=install), I(state=latest), or I(state=inject). + type: bool + default: false + inject_packages: + description: + - Packages to be injected into an existing virtual environment. + - Only used when I(state=inject). + type: list + elements: str + force: + description: + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), I(state=latest), or I(state=inject). + type: bool + default: false + include_injected: + description: + - Upgrade the injected packages along with the application. + - Only used when I(state=upgrade), I(state=upgrade_all), or I(state=latest). + - This is used with I(state=upgrade) and I(state=latest) since community.general 6.6.0. + type: bool + default: false + index_url: + description: + - Base URL of Python Package Index. + - Only used when I(state=install), I(state=upgrade), I(state=latest), or I(state=inject). + type: str + python: + description: + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when I(state=install), I(state=latest), I(state=reinstall), or I(state=reinstall_all). + type: str + system_site_packages: + description: + - Give application virtual environment access to the system site-packages directory. + - Only used when I(state=install) or I(state=latest). + type: bool + default: false + version_added: 6.6.0 + executable: + description: + - Path to the C(pipx) installed in the system. + - > + If not specified, the module will use C(python -m pipx) to run the tool, + using the same Python interpreter as ansible itself. + type: path + editable: + description: + - Install the project in editable mode. + type: bool + default: false + version_added: 4.6.0 + pip_args: + description: + - Arbitrary arguments to pass directly to C(pip). + type: str + version_added: 4.6.0 +notes: + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. + - > + This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR) + passed using the R(environment Ansible keyword, playbooks_environment). + - This module requires C(pipx) version 0.16.2.1 or above. + - Please note that C(pipx) requires Python 3.6 or above. + - > + This first implementation does not verify whether a specified version constraint has been installed or not. + Hence, when using version operators, C(pipx) module will always try to execute the operation, + even when the application was previously installed. + This feature will be added in the future. + - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). +author: + - "Alexei Znamensky (@russoz)" +''' + +EXAMPLES = ''' +- name: Install tox + community.general.pipx: + name: tox + +- name: Install tox from git repository + community.general.pipx: + name: tox + source: git+https://github.com/tox-dev/tox.git + +- name: Upgrade tox + community.general.pipx: + name: tox + state: upgrade + +- name: Reinstall black with specific Python version + community.general.pipx: + name: black + state: reinstall + python: 3.7 + +- name: Uninstall pycowsay + community.general.pipx: + name: pycowsay + state: absent +''' + + +import json + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner + +from ansible.module_utils.facts.compat import ansible_facts + + +class PipX(StateModuleHelper): + output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + module = dict( + argument_spec=dict( + state=dict(type='str', default='install', + choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', + 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']), + name=dict(type='str'), + source=dict(type='str'), + install_apps=dict(type='bool', default=False), + install_deps=dict(type='bool', default=False), + inject_packages=dict(type='list', elements='str'), + force=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + index_url=dict(type='str'), + python=dict(type='str'), + system_site_packages=dict(type='bool', default=False), + executable=dict(type='path'), + editable=dict(type='bool', default=False), + pip_args=dict(type='str'), + ), + required_if=[ + ('state', 'present', ['name']), + ('state', 'install', ['name']), + ('state', 'absent', ['name']), + ('state', 'uninstall', ['name']), + ('state', 'upgrade', ['name']), + ('state', 'reinstall', ['name']), + ('state', 'latest', ['name']), + ('state', 'inject', ['name', 'inject_packages']), + ], + supports_check_mode=True, + ) + + def _retrieve_installed(self): + def process_list(rc, out, err): + if not out: + return {} + + results = {} + raw_data = json.loads(out) + for venv_name, venv in raw_data['venvs'].items(): + results[venv_name] = { + 'version': venv['metadata']['main_package']['package_version'], + 'injected': dict( + (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() + ), + } + return results + + installed = self.runner('_list', output_process=process_list).run(_list=1) + + if self.vars.name is not None: + app_list = installed.get(self.vars.name) + if app_list: + return {self.vars.name: app_list} + else: + return {} + + return installed + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + + self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + + def __quit_module__(self): + self.vars.application = self._retrieve_installed() + + def _capture_results(self, ctx): + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + def state_install(self): + if not self.vars.application or self.vars.force: + self.changed = True + with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + ctx.run(name_source=[self.vars.name, self.vars.source]) + self._capture_results(ctx) + + state_present = state_install + + def state_upgrade(self): + if not self.vars.application: + self.do_raise("Trying to upgrade a non-existent application: {0}".format(self.vars.name)) + if self.vars.force: + self.changed = True + + with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_uninstall(self): + if self.vars.application: + with self.runner('state name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + state_absent = state_uninstall + + def state_reinstall(self): + if not self.vars.application: + self.do_raise("Trying to reinstall a non-existent application: {0}".format(self.vars.name)) + self.changed = True + with self.runner('state name python', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_inject(self): + if not self.vars.application: + self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name)) + if self.vars.force: + self.changed = True + with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_uninstall_all(self): + with self.runner('state', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_reinstall_all(self): + with self.runner('state python', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_all(self): + if self.vars.force: + self.changed = True + with self.runner('state include_injected force', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_latest(self): + if not self.vars.application or self.vars.force: + self.changed = True + with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx: + ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) + self._capture_results(ctx) + + with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(state='upgrade') + self._capture_results(ctx) + + +def main(): + PipX.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pipx_info.py b/ansible_collections/community/general/plugins/modules/pipx_info.py new file mode 100644 index 000000000..e2bb7fdae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pipx_info.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pipx_info +short_description: Rretrieves information about applications installed with pipx +version_added: 5.6.0 +description: + - Retrieve details about Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of an application installed with C(pipx). + type: str + include_deps: + description: + - Include dependent packages in the output. + type: bool + default: false + include_injected: + description: + - Include injected packages in the output. + type: bool + default: false + include_raw: + description: + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by I(include_deps) or I(include_injected). + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - > + If not specified, the module will use C(python -m pipx) to run the tool, + using the same Python interpreter as ansible itself. + type: path +notes: + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. + - > + This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR) + passed using the R(environment Ansible keyword, playbooks_environment). + - This module requires C(pipx) version 0.16.2.1 or above. + - Please note that C(pipx) requires Python 3.6 or above. + - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). +author: + - "Alexei Znamensky (@russoz)" +''' + +EXAMPLES = ''' +- name: retrieve all installed applications + community.general.pipx_info: {} + +- name: retrieve all installed applications, include dependencies and injected packages + community.general.pipx_info: + include_deps: true + include_injected: true + +- name: retrieve application tox + community.general.pipx_info: + name: tox + include_deps: true + +- name: retrieve application ansible-lint, include dependencies + community.general.pipx_info: + name: ansible-lint + include_deps: true +''' + +RETURN = ''' +application: + description: The list of installed applications + returned: success + type: list + elements: dict + contains: + name: + description: The name of the installed application. + returned: success + type: str + sample: "tox" + version: + description: The version of the installed application. + returned: success + type: str + sample: "3.24.0" + dependencies: + description: The dependencies of the installed application, when I(include_deps=true). + returned: success + type: list + elements: str + sample: ["virtualenv"] + injected: + description: The injected packages for the installed application, when I(include_injected=true). + returned: success + type: dict + sample: + licenses: "0.6.1" + +raw_output: + description: The raw output of the C(pipx list) command, when I(include_raw=true). Used for debugging. + returned: success + type: dict + +cmd: + description: Command executed to obtain the list of installed applications. + returned: success + type: list + elements: str + sample: [ + "/usr/bin/python3.10", + "-m", + "pipx", + "list", + "--include-injected", + "--json" + ] +''' + +import json + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner + +from ansible.module_utils.facts.compat import ansible_facts + + +class PipXInfo(ModuleHelper): + output_params = ['name'] + module = dict( + argument_spec=dict( + name=dict(type='str'), + include_deps=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + include_raw=dict(type='bool', default=False), + executable=dict(type='path'), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + + # self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + + def __run__(self): + def process_list(rc, out, err): + if not out: + return [] + + results = [] + raw_data = json.loads(out) + if self.vars.include_raw: + self.vars.raw_output = raw_data + + if self.vars.name: + if self.vars.name in raw_data['venvs']: + data = {self.vars.name: raw_data['venvs'][self.vars.name]} + else: + data = {} + else: + data = raw_data['venvs'] + + for venv_name, venv in data.items(): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'] + } + if self.vars.include_injected: + entry['injected'] = dict( + (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() + ) + if self.vars.include_deps: + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + results.append(entry) + + return results + + with self.runner('_list', output_process=process_list) as ctx: + self.vars.application = ctx.run(_list=1) + self._capture_results(ctx) + + def _capture_results(self, ctx): + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + PipXInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pkg5.py b/ansible_collections/community/general/plugins/modules/pkg5.py new file mode 100644 index 000000000..f6bc77a71 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pkg5.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Peter Oliver +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: pkg5 +author: +- Peter Oliver (@mavit) +short_description: Manages packages with the Solaris 11 Image Packaging System +description: + - IPS packages are the native packages in Solaris 11 and higher. +notes: + - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An FRMI of the package(s) to be installed/removed/updated. + - Multiple packages may be specified, separated by C(,). + required: true + type: list + elements: str + state: + description: + - Whether to install (I(present), I(latest)), or remove (I(absent)) a package. + choices: [ absent, latest, present, installed, removed, uninstalled ] + default: present + type: str + accept_licenses: + description: + - Accept any licences. + type: bool + default: false + aliases: [ accept, accept_licences ] + be_name: + description: + - Creates a new boot environment with the given name. + type: str + refresh: + description: + - Refresh publishers before execution. + type: bool + default: true +''' +EXAMPLES = ''' +- name: Install Vim + community.general.pkg5: + name: editor/vim + +- name: Install Vim without refreshing publishers + community.general.pkg5: + name: editor/vim + refresh: false + +- name: Remove finger daemon + community.general.pkg5: + name: service/network/finger + state: absent + +- name: Install several packages at once + community.general.pkg5: + name: + - /file/gnu-findutils + - /text/gnu-grep +''' + +import re + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']), + accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), + be_name=dict(type='str'), + refresh=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + params = module.params + packages = [] + + # pkg(5) FRMIs include a comma before the release number, but + # AnsibleModule will have split this into multiple items for us. + # Try to spot where this has happened and fix it. + for fragment in params['name']: + if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]): + packages[-1] += ',' + fragment + else: + packages.append(fragment) + + if params['state'] in ['present', 'installed']: + ensure(module, 'present', packages, params) + elif params['state'] in ['latest']: + ensure(module, 'latest', packages, params) + elif params['state'] in ['absent', 'uninstalled', 'removed']: + ensure(module, 'absent', packages, params) + + +def ensure(module, state, packages, params): + response = { + 'results': [], + 'msg': '', + } + behaviour = { + 'present': { + 'filter': lambda p: not is_installed(module, p), + 'subcommand': 'install', + }, + 'latest': { + 'filter': lambda p: ( + not is_installed(module, p) or not is_latest(module, p) + ), + 'subcommand': 'install', + }, + 'absent': { + 'filter': lambda p: is_installed(module, p), + 'subcommand': 'uninstall', + }, + } + + if module.check_mode: + dry_run = ['-n'] + else: + dry_run = [] + + if params['accept_licenses']: + accept_licenses = ['--accept'] + else: + accept_licenses = [] + + if params['be_name']: + beadm = ['--be-name=' + module.params['be_name']] + else: + beadm = [] + + if params['refresh']: + no_refresh = [] + else: + no_refresh = ['--no-refresh'] + + to_modify = list(filter(behaviour[state]['filter'], packages)) + if to_modify: + rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify) + response['rc'] = rc + response['results'].append(out) + response['msg'] += err + response['changed'] = True + if rc == 4: + response['changed'] = False + response['failed'] = False + elif rc != 0: + module.fail_json(**response) + + module.exit_json(**response) + + +def is_installed(module, package): + rc, out, err = module.run_command(['pkg', 'list', '--', package]) + return not bool(int(rc)) + + +def is_latest(module, package): + rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + return bool(int(rc)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pkg5_publisher.py b/ansible_collections/community/general/plugins/modules/pkg5_publisher.py new file mode 100644 index 000000000..9d1b38138 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pkg5_publisher.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Peter Oliver +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pkg5_publisher +author: "Peter Oliver (@mavit)" +short_description: Manages Solaris 11 Image Packaging System publishers +description: + - IPS packages are the native packages in Solaris 11 and higher. + - This modules will configure which publishers a client will download IPS + packages from. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The publisher's name. + required: true + aliases: [ publisher ] + type: str + state: + description: + - Whether to ensure that a publisher is present or absent. + default: present + choices: [ present, absent ] + type: str + sticky: + description: + - Packages installed from a sticky repository can only receive updates + from that repository. + type: bool + enabled: + description: + - Is the repository enabled or disabled? + type: bool + origin: + description: + - A path or URL to the repository. + - Multiple values may be provided. + type: list + elements: str + mirror: + description: + - A path or URL to the repository mirror. + - Multiple values may be provided. + type: list + elements: str +''' +EXAMPLES = ''' +- name: Fetch packages for the solaris publisher direct from Oracle + community.general.pkg5_publisher: + name: solaris + sticky: true + origin: https://pkg.oracle.com/solaris/support/ + +- name: Configure a publisher for locally-produced packages + community.general.pkg5_publisher: + name: site + origin: 'https://pkg.example.com/site/' +''' + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['publisher']), + state=dict(default='present', choices=['present', 'absent']), + sticky=dict(type='bool'), + enabled=dict(type='bool'), + # search_after=dict(), + # search_before=dict(), + origin=dict(type='list', elements='str'), + mirror=dict(type='list', elements='str'), + ) + ) + + for option in ['origin', 'mirror']: + if module.params[option] == ['']: + module.params[option] = [] + + if module.params['state'] == 'present': + modify_publisher(module, module.params) + else: + unset_publisher(module, module.params['name']) + + +def modify_publisher(module, params): + name = params['name'] + existing = get_publishers(module) + + if name in existing: + for option in ['origin', 'mirror', 'sticky', 'enabled']: + if params[option] is not None: + if params[option] != existing[name][option]: + return set_publisher(module, params) + else: + return set_publisher(module, params) + + module.exit_json() + + +def set_publisher(module, params): + name = params['name'] + args = [] + + if params['origin'] is not None: + args.append('--remove-origin=*') + args.extend(['--add-origin=' + u for u in params['origin']]) + if params['mirror'] is not None: + args.append('--remove-mirror=*') + args.extend(['--add-mirror=' + u for u in params['mirror']]) + + if params['sticky'] is not None and params['sticky']: + args.append('--sticky') + elif params['sticky'] is not None: + args.append('--non-sticky') + + if params['enabled'] is not None and params['enabled']: + args.append('--enable') + elif params['enabled'] is not None: + args.append('--disable') + + rc, out, err = module.run_command( + ["pkg", "set-publisher"] + args + [name], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def unset_publisher(module, publisher): + if publisher not in get_publishers(module): + module.exit_json() + + rc, out, err = module.run_command( + ["pkg", "unset-publisher", publisher], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + if rc != 0: + module.fail_json(**response) + module.exit_json(**response) + + +def get_publishers(module): + rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) + + lines = out.splitlines() + keys = lines.pop(0).lower().split("\t") + + publishers = {} + for line in lines: + values = dict(zip(keys, map(unstringify, line.split("\t")))) + name = values['publisher'] + + if name not in publishers: + publishers[name] = dict( + (k, values[k]) for k in ['sticky', 'enabled'] + ) + publishers[name]['origin'] = [] + publishers[name]['mirror'] = [] + + if values['type'] is not None: + publishers[name][values['type']].append(values['uri']) + + return publishers + + +def unstringify(val): + if val == "-" or val == '': + return None + elif val == "true": + return True + elif val == "false": + return False + else: + return val + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pkgin.py b/ansible_collections/community/general/plugins/modules/pkgin.py new file mode 100644 index 000000000..c08b25218 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pkgin.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013 Shaun Zinck +# Copyright (c) 2015 Lawrence Leonard Gilbert +# Copyright (c) 2016 Jasper Lievisse Adriaanse +# +# Written by Shaun Zinck +# Based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pkgin +short_description: Package manager for SmartOS, NetBSD, et al +description: + - "The standard package manager for SmartOS, but also usable on NetBSD + or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" +author: + - "Larry Gilbert (@L2G)" + - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" +notes: + - "Known bug with pkgin < 0.8.0: if a package is removed and another + package depends on it, the other package will be silently removed as + well. New to Ansible 1.9: check-mode support." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of package to install/remove; + - multiple names may be given, separated by commas + aliases: [pkg] + type: list + elements: str + state: + description: + - Intended state of the package + choices: [ 'present', 'absent' ] + default: present + type: str + update_cache: + description: + - Update repository database. Can be run with other steps or on it's own. + type: bool + default: false + upgrade: + description: + - Upgrade main packages to their newer versions + type: bool + default: false + full_upgrade: + description: + - Upgrade all packages to their newer versions + type: bool + default: false + clean: + description: + - Clean packages cache + type: bool + default: false + force: + description: + - Force package reinstall + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.pkgin: + name: foo + state: present + +- name: Install specific version of foo package + community.general.pkgin: + name: foo-2.0.1 + state: present + +- name: Update cache and install foo package + community.general.pkgin: + name: foo + update_cache: true + +- name: Remove package foo + community.general.pkgin: + name: foo + state: absent + +- name: Remove packages foo and bar + community.general.pkgin: + name: foo,bar + state: absent + +- name: Update repositories as a separate step + community.general.pkgin: + update_cache: true + +- name: Upgrade main packages (equivalent to pkgin upgrade) + community.general.pkgin: + upgrade: true + +- name: Upgrade all packages (equivalent to pkgin full-upgrade) + community.general.pkgin: + full_upgrade: true + +- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade) + community.general.pkgin: + full_upgrade: true + force: true + +- name: Clean packages cache (equivalent to pkgin clean) + community.general.pkgin: + clean: true +''' + + +import re + +from ansible.module_utils.basic import AnsibleModule + + +class PackageState(object): + PRESENT = 1 + NOT_INSTALLED = 2 + OUTDATED = 4 + NOT_FOUND = 8 + + +def query_package(module, name): + """Search for the package by name and return state of the package. + """ + + # test whether '-p' (parsable) flag is supported. + rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) + + if rc == 0: + pflag = '-p' + splitchar = ';' + else: + pflag = '' + splitchar = ' ' + + # Use "pkgin search" to find the package. The regular expression will + # only match on the complete name. + rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) + + # rc will not be 0 unless the search was a success + if rc == 0: + + # Search results may contain more than one line (e.g., 'emacs'), so iterate + # through each line to see if we have a match. + packages = out.split('\n') + + for package in packages: + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + pkgname_with_version, raw_state = package.split(splitchar)[0:2] + + # Search for package, stripping version + # (results in sth like 'gcc47-libs' or 'emacs24-nox11') + pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) + + # Do not proceed unless we have a match + if not pkg_search_obj: + continue + + # Grab matched string + pkgname_without_version = pkg_search_obj.group(1) + + if name not in (pkgname_with_version, pkgname_without_version): + continue + + # The package was found; now return its state + if raw_state == '<': + return PackageState.OUTDATED + elif raw_state == '=' or raw_state == '>': + return PackageState.PRESENT + else: + # Package found but not installed + return PackageState.NOT_INSTALLED + # no fall-through + + # No packages were matched + return PackageState.NOT_FOUND + + # Search failed + return PackageState.NOT_FOUND + + +def format_action_message(module, action, count): + vars = {"actioned": action, + "count": count} + + if module.check_mode: + message = "would have %(actioned)s %(count)d package" % vars + else: + message = "%(actioned)s %(count)d package" % vars + + if count == 1: + return message + else: + return message + "s" + + +def format_pkgin_command(module, command, package=None): + # Not all commands take a package argument, so cover this up by passing + # an empty string. Some commands (e.g. 'update') will ignore extra + # arguments, however this behaviour cannot be relied on for others. + if package is None: + package = "" + + if module.params["force"]: + force = "-F" + else: + force = "" + + vars = {"pkgin": PKGIN_PATH, + "command": command, + "package": package, + "force": force} + + if module.check_mode: + return "%(pkgin)s -n %(command)s %(package)s" % vars + else: + return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars + + +def remove_packages(module, packages): + + remove_c = 0 + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]: + continue + + rc, out, err = module.run_command( + format_pkgin_command(module, "remove", package)) + + if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages): + + install_c = 0 + + for package in packages: + query_result = query_package(module, package) + if query_result in [PackageState.PRESENT, PackageState.OUTDATED]: + continue + elif query_result is PackageState.NOT_FOUND: + module.fail_json(msg="failed to find package %s for installation" % package) + + rc, out, err = module.run_command( + format_pkgin_command(module, "install", package)) + + if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: + module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_package_db(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "update")) + + if rc == 0: + if re.search('database for.*is up-to-date\n$', out): + return False, "database is up-to-date" + else: + return True, "updated repository database" + else: + module.fail_json(msg="could not update package db", stdout=out, stderr=err) + + +def do_upgrade_packages(module, full=False): + if full: + cmd = "full-upgrade" + else: + cmd = "upgrade" + + rc, out, err = module.run_command( + format_pkgin_command(module, cmd)) + + if rc == 0: + if re.search('^nothing to do.\n$', out): + module.exit_json(changed=False, msg="nothing left to upgrade") + else: + module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) + + +def upgrade_packages(module): + do_upgrade_packages(module) + + +def full_upgrade_packages(module): + do_upgrade_packages(module, True) + + +def clean_cache(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "clean")) + + if rc == 0: + # There's no indication if 'clean' actually removed anything, + # so assume it did. + module.exit_json(changed=True, msg="cleaned caches") + else: + module.fail_json(msg="could not clean package cache", stdout=out, stderr=err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + full_upgrade=dict(default=False, type='bool'), + clean=dict(default=False, type='bool'), + force=dict(default=False, type='bool')), + required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], + supports_check_mode=True) + + global PKGIN_PATH + PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + p = module.params + + if p["update_cache"]: + c, msg = update_package_db(module) + if not (p['name'] or p["upgrade"] or p["full_upgrade"]): + module.exit_json(changed=c, msg=msg) + + if p["upgrade"]: + upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded packages') + + if p["full_upgrade"]: + full_upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded all packages') + + if p["clean"]: + clean_cache(module) + if not p['name']: + module.exit_json(changed=True, msg='cleaned caches') + + pkgs = p["name"] + + if p["state"] == "present": + install_packages(module, pkgs) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pkgng.py b/ansible_collections/community/general/plugins/modules/pkgng.py new file mode 100644 index 000000000..b9d4422c0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pkgng.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, bleader +# Written by bleader +# Based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name or list of names of packages to install/remove. + - "With I(name=*), I(state=latest) will operate, but I(state=present) and I(state=absent) will be noops." + - > + Warning: In Ansible 2.9 and earlier this module had a misfeature + where I(name=*) with I(state=latest) or I(state=present) would + install every package from every package repository, filling up + the machines disk. Avoid using them unless you are certain that + your role will only be used with newer versions. + required: true + aliases: [pkg] + type: list + elements: str + state: + description: + - State of the package. + - 'Note: C(latest) added in 2.7.' + choices: [ 'present', 'latest', 'absent' ] + required: false + default: present + type: str + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: false + annotation: + description: + - A list of keyvalue-pairs of the form + C(<+/-/:>[=]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + type: list + elements: str + pkgsite: + description: + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). + required: false + type: str + rootdir: + description: + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) or I(jail) options. + required: false + type: path + chroot: + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) or I(jail) options. + required: false + type: path + jail: + description: + - Pkg will execute in the given jail name or id. + - Can not be used together with I(chroot) or I(rootdir) options. + type: str + autoremove: + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: false + ignore_osver: + description: + - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches. + - Defines the C(IGNORE_OSVERSION) environment variable. + required: false + type: bool + default: false + version_added: 1.3.0 +author: "bleader (@bleader)" +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.pkgng: + name: foo + state: present + +- name: Annotate package foo and bar + community.general.pkgng: + name: + - foo + - bar + annotation: '+test1=baz,-test2,:test3=foobar' + +- name: Remove packages foo and bar + community.general.pkgng: + name: + - foo + - bar + state: absent + +# "latest" support added in 2.7 +- name: Upgrade package baz + community.general.pkgng: + name: baz + state: latest + +- name: Upgrade all installed packages (see warning for the name option first!) + community.general.pkgng: + name: "*" + state: latest +''' + + +from collections import defaultdict +import re +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, run_pkgng, name): + + rc, out, err = run_pkgng('info', '-g', '-e', name) + + return rc == 0 + + +def query_update(module, run_pkgng, name): + + # Check to see if a package upgrade is available. + # rc = 0, no updates available or package not installed + # rc = 1, updates available + rc, out, err = run_pkgng('upgrade', '-g', '-n', name) + + return rc == 1 + + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command([pkgng_path, '-v']) + version = [int(x) for x in re.split(r'[\._]', out)] + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def upgrade_packages(module, run_pkgng): + # Run a 'pkg upgrade', updating all packages. + upgraded_c = 0 + + pkgng_args = ['upgrade'] + pkgng_args.append('-n' if module.check_mode else '-y') + rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode)) + + matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE) + for match in matches: + upgraded_c += int(match) + + if upgraded_c > 0: + return (True, "updated %s package(s)" % upgraded_c, out, err) + return (False, "no packages need upgrades", out, err) + + +def remove_packages(module, run_pkgng, packages): + remove_c = 0 + stdout = "" + stderr = "" + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, run_pkgng, package): + continue + + if not module.check_mode: + rc, out, err = run_pkgng('delete', '-y', package) + stdout += out + stderr += err + + if not module.check_mode and query_package(module, run_pkgng, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr) + + remove_c += 1 + + if remove_c > 0: + return (True, "removed %s package(s)" % remove_c, stdout, stderr) + + return (False, "package(s) already absent", stdout, stderr) + + +def install_packages(module, run_pkgng, packages, cached, state): + action_queue = defaultdict(list) + action_count = defaultdict(int) + stdout = "" + stderr = "" + + if not module.check_mode and not cached: + rc, out, err = run_pkgng('update') + stdout += out + stderr += err + if rc != 0: + module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr) + + for package in packages: + already_installed = query_package(module, run_pkgng, package) + if already_installed and state == "present": + continue + + if ( + already_installed and state == "latest" + and not query_update(module, run_pkgng, package) + ): + continue + + if already_installed: + action_queue["upgrade"].append(package) + else: + action_queue["install"].append(package) + + # install/upgrade all named packages with one pkg command + for (action, package_list) in action_queue.items(): + if module.check_mode: + # Do nothing, but count up how many actions + # would be performed so that the changed/msg + # is correct. + action_count[action] += len(package_list) + continue + + pkgng_args = [action, '-g', '-U', '-y'] + package_list + rc, out, err = run_pkgng(*pkgng_args) + stdout += out + stderr += err + + # individually verify packages are in requested state + for package in package_list: + verified = False + if action == 'install': + verified = query_package(module, run_pkgng, package) + elif action == 'upgrade': + verified = not query_update(module, run_pkgng, package) + + if verified: + action_count[action] += 1 + else: + module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr) + + if sum(action_count.values()) > 0: + past_tense = {'install': 'installed', 'upgrade': 'upgraded'} + messages = [] + for (action, count) in action_count.items(): + messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else "")) + + return (True, '; '.join(messages), stdout, stderr) + + return (False, "package(s) already %s" % (state), stdout, stderr) + + +def annotation_query(module, run_pkgng, package, tag): + rc, out, err = run_pkgng('info', '-g', '-A', package) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if not _value: + # Annotation does not exist, add it. + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-A', package, tag, data=value, binary_data=True) + if rc != 0: + module.fail_json(msg="could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + msg="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + + +def annotation_delete(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if _value: + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-D', package, tag) + if rc != 0: + module.fail_json(msg="could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + + +def annotation_modify(module, run_pkgng, package, tag, value): + _value = annotation_query(module, run_pkgng, package, tag) + if not _value: + # No such tag + module.fail_json(msg="could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + if not module.check_mode: + rc, out, err = run_pkgng('annotate', '-y', '-M', package, tag, data=value, binary_data=True) + + # pkg sometimes exits with rc == 1, even though the modification succeeded + # Check the output for a success message + if ( + rc != 0 + and re.search(r'^%s-[^:]+: Modified annotation tagged: %s' % (package, tag), out, flags=re.MULTILINE) is None + ): + module.fail_json(msg="failed to annotate %s, could not change annotation %s to %s: %s" + % (package, tag, value, out), stderr=err) + return True + + +def annotate_packages(module, run_pkgng, packages, annotations): + annotate_c = 0 + if len(annotations) == 1: + # Split on commas with optional trailing whitespace, + # to support the old style of multiple annotations + # on a single line, rather than YAML list syntax + annotations = re.split(r'\s*,\s*', annotations[0]) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for annotation_string in annotations: + # Note to future maintainers: A dash (-) in a regex character class ([-+:] below) + # must appear as the first character in the class, or it will be interpreted + # as a range of characters. + annotation = \ + re.match(r'(?P[-+:])(?P[^=]+)(=(?P.+))?', annotation_string) + + if annotation is None: + module.fail_json( + msg="failed to annotate %s, invalid annotate string: %s" + % (package, annotation_string) + ) + + annotation = annotation.groupdict() + if operation[annotation['operation']](module, run_pkgng, package, annotation['tag'], annotation['value']): + annotate_c += 1 + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + + +def autoremove_packages(module, run_pkgng): + stdout = "" + stderr = "" + rc, out, err = run_pkgng('autoremove', '-n') + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return (False, "no package(s) to autoremove", stdout, stderr) + + if not module.check_mode: + rc, out, err = run_pkgng('autoremove', '-y') + stdout += out + stderr += err + + return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "latest", "absent"], required=False), + name=dict(aliases=["pkg"], required=True, type='list', elements='str'), + cached=dict(default=False, type='bool'), + ignore_osver=dict(default=False, required=False, type='bool'), + annotation=dict(required=False, type='list', elements='str'), + pkgsite=dict(required=False), + rootdir=dict(required=False, type='path'), + chroot=dict(required=False, type='path'), + jail=dict(required=False, type='str'), + autoremove=dict(default=False, type='bool')), + supports_check_mode=True, + mutually_exclusive=[["rootdir", "chroot", "jail"]]) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"] + + changed = False + msgs = [] + stdout = "" + stderr = "" + dir_arg = None + + if p["rootdir"] is not None: + rootdir_not_supported = pkgng_older_than(module, pkgng_path, [1, 5, 0]) + if rootdir_not_supported: + module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") + else: + dir_arg = "--rootdir=%s" % (p["rootdir"]) + + if p["ignore_osver"]: + ignore_osver_not_supported = pkgng_older_than(module, pkgng_path, [1, 11, 0]) + if ignore_osver_not_supported: + module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater") + + if p["chroot"] is not None: + dir_arg = '--chroot=%s' % (p["chroot"]) + + if p["jail"] is not None: + dir_arg = '--jail=%s' % (p["jail"]) + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + repo_flag_not_supported = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + + def run_pkgng(action, *args, **kwargs): + cmd = [pkgng_path, dir_arg, action] + + pkgng_env = {'BATCH': 'yes'} + + if p["ignore_osver"]: + pkgng_env['IGNORE_OSVERSION'] = 'yes' + + if p['pkgsite'] is not None and action in ('update', 'install', 'upgrade',): + if repo_flag_not_supported: + pkgng_env['PACKAGESITE'] = p['pkgsite'] + else: + cmd.append('--repository=%s' % (p['pkgsite'],)) + + # If environ_update is specified to be "passed through" + # to module.run_command, then merge its values into pkgng_env + pkgng_env.update(kwargs.pop('environ_update', dict())) + + return module.run_command(cmd + list(args), environ_update=pkgng_env, **kwargs) + + if pkgs == ['*'] and p["state"] == 'latest': + # Operate on all installed packages. Only state: latest makes sense here. + _changed, _msg, _stdout, _stderr = upgrade_packages(module, run_pkgng) + changed = changed or _changed + stdout += _stdout + stderr += _stderr + msgs.append(_msg) + + # Operate on named packages + if len(pkgs) == 1: + # The documentation used to show multiple packages specified in one line + # with comma or space delimiters. That doesn't result in a YAML list, and + # wrong actions (install vs upgrade) can be reported if those + # comma- or space-delimited strings make it to the pkg command line. + pkgs = re.split(r'[,\s]', pkgs[0]) + named_packages = [pkg for pkg in pkgs if pkg != '*'] + if p["state"] in ("present", "latest") and named_packages: + _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages, + p["cached"], p["state"]) + stdout += _out + stderr += _err + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent" and named_packages: + _changed, _msg, _out, _err = remove_packages(module, run_pkgng, named_packages) + stdout += _out + stderr += _err + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg, _stdout, _stderr = autoremove_packages(module, run_pkgng) + changed = changed or _changed + stdout += _stdout + stderr += _stderr + msgs.append(_msg) + + if p["annotation"] is not None: + _changed, _msg = annotate_packages(module, run_pkgng, pkgs, p["annotation"]) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pkgutil.py b/ansible_collections/community/general/plugins/modules/pkgutil.py new file mode 100644 index 000000000..5af74c1f3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pkgutil.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Alexander Winkler +# based on svr4pkg by +# Boyd Adamson (2012) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: pkgutil +short_description: OpenCSW package management on Solaris +description: +- This module installs, updates and removes packages from the OpenCSW project for Solaris. +- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. +- See U(https://www.opencsw.org/) for more information about the project. +author: +- Alexander Winkler (@dermute) +- David Ponessa (@scathatheworm) +extends_documentation_fragment: +- community.general.attributes +attributes: + check_mode: + support: full + details: + - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode. + diff_mode: + support: none +options: + name: + description: + - The name of the package. + - When using I(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil. + type: list + required: true + elements: str + aliases: [ pkg ] + site: + description: + - The repository path to install the package from. + - Its global definition is in C(/etc/opt/csw/pkgutil.conf). + required: false + type: str + state: + description: + - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages. + - The upgrade (C(latest)) operation will update/install the packages to the latest version available. + type: str + required: true + choices: [ absent, installed, latest, present, removed ] + update_catalog: + description: + - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(true). + type: bool + default: false + force: + description: + - To allow the update process to downgrade packages to match what is present in the repository, set this to C(true). + - This is useful for rolling back to stable from testing, or similar operations. + type: bool + default: false + version_added: 1.2.0 +''' + +EXAMPLES = r''' +- name: Install a package + community.general.pkgutil: + name: CSWcommon + state: present + +- name: Install a package from a specific repository + community.general.pkgutil: + name: CSWnrpe + site: ftp://myinternal.repo/opencsw/kiel + state: latest + +- name: Remove a package + community.general.pkgutil: + name: CSWtop + state: absent + +- name: Install several packages + community.general.pkgutil: + name: + - CSWsudo + - CSWtop + state: present + +- name: Update all packages + community.general.pkgutil: + name: '*' + state: latest + +- name: Update all packages and force versions to match latest in catalog + community.general.pkgutil: + name: '*' + state: latest + force: true +''' + +RETURN = r''' # ''' + +from ansible.module_utils.basic import AnsibleModule + + +def packages_not_installed(module, names): + ''' Check if each package is installed and return list of the ones absent ''' + pkgs = [] + for pkg in names: + rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + if rc != 0: + pkgs.append(pkg) + return pkgs + + +def packages_installed(module, names): + ''' Check if each package is installed and return list of the ones present ''' + pkgs = [] + for pkg in names: + if not pkg.startswith('CSW'): + continue + rc, out, err = run_command(module, ['pkginfo', '-q', pkg]) + if rc == 0: + pkgs.append(pkg) + return pkgs + + +def packages_not_latest(module, names, site, update_catalog): + ''' Check status of each package and return list of the ones with an upgrade available ''' + cmd = ['pkgutil'] + if update_catalog: + cmd.append('-U') + cmd.append('-c') + if site is not None: + cmd.extend(['-t', site]) + if names != ['*']: + cmd.extend(names) + rc, out, err = run_command(module, cmd) + + # Find packages in the catalog which are not up to date + packages = [] + for line in out.split('\n')[1:-1]: + if 'catalog' not in line and 'SAME' not in line: + packages.append(line.split(' ')[0]) + + # Remove duplicates + return list(set(packages)) + + +def run_command(module, cmd, **kwargs): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) + return module.run_command(cmd, **kwargs) + + +def package_install(module, state, pkgs, site, update_catalog, force): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-iy') + if update_catalog: + cmd.append('-U') + if site is not None: + cmd.extend(['-t', site]) + if force: + cmd.append('-f') + cmd.extend(pkgs) + return run_command(module, cmd) + + +def package_upgrade(module, pkgs, site, update_catalog, force): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-uy') + if update_catalog: + cmd.append('-U') + if site is not None: + cmd.extend(['-t', site]) + if force: + cmd.append('-f') + cmd += pkgs + return run_command(module, cmd) + + +def package_uninstall(module, pkgs): + cmd = ['pkgutil'] + if module.check_mode: + cmd.append('-n') + cmd.append('-ry') + cmd.extend(pkgs) + return run_command(module, cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True, aliases=['pkg']), + state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), + site=dict(type='str'), + update_catalog=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + name = module.params['name'] + state = module.params['state'] + site = module.params['site'] + update_catalog = module.params['update_catalog'] + force = module.params['force'] + + rc = None + out = '' + err = '' + result = dict( + name=name, + state=state, + ) + + if state in ['installed', 'present']: + # Fail with an explicit error when trying to "install" '*' + if name == ['*']: + module.fail_json(msg="Can not use 'state: present' with name: '*'") + + # Build list of packages that are actually not installed from the ones requested + pkgs = packages_not_installed(module, name) + + # If the package list is empty then all packages are already present + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + + elif state in ['latest']: + # When using latest for * + if name == ['*']: + # Check for packages that are actually outdated + pkgs = packages_not_latest(module, name, site, update_catalog) + + # If the package list comes up empty, everything is already up to date + if pkgs == []: + module.exit_json(changed=False) + + # If there are packages to update, just empty the list and run the command without it + # pkgutil logic is to update all when run without packages names + pkgs = [] + (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + else: + # Build list of packages that are either outdated or not installed + pkgs = packages_not_installed(module, name) + pkgs += packages_not_latest(module, name, site, update_catalog) + + # If the package list is empty that means all packages are installed and up to date + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force) + if rc != 0: + module.fail_json(msg=(err or out)) + + elif state in ['absent', 'removed']: + # Build list of packages requested for removal that are actually present + pkgs = packages_installed(module, name) + + # If the list is empty, no packages need to be removed + if pkgs == []: + module.exit_json(changed=False) + + (rc, out, err) = package_uninstall(module, pkgs) + if rc != 0: + module.fail_json(msg=(err or out)) + + if rc is None: + # pkgutil was not executed because the package was already present/absent/up to date + result['changed'] = False + elif rc == 0: + result['changed'] = True + else: + result['changed'] = False + result['failed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pmem.py b/ansible_collections/community/general/plugins/modules/pmem.py new file mode 100644 index 000000000..d7fcb8e01 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pmem.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2022, Masayoshi Mizuma +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Masayoshi Mizuma (@mizumm) +module: pmem +short_description: Configure Intel Optane Persistent Memory modules +version_added: 4.5.0 +description: + - This module allows Configuring Intel Optane Persistent Memory modules + (PMem) using ipmctl and ndctl command line tools. +requirements: + - ipmctl and ndctl command line tools + - xmltodict +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + appdirect: + description: + - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)). + - Create AppDirect capacity utilizing hardware interleaving across the + requested PMem modules if applicable given the specified target. + - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) + type: int + appdirect_interleaved: + description: + - Create AppDirect capacity that is interleaved any other PMem modules. + type: bool + required: false + default: true + memorymode: + description: + - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)). + type: int + reserved: + description: + - Percentage of the capacity to reserve (C(0)-C(100)). I(reserved) will not be mapped + into the system physical address space and will be presented as reserved + capacity with Show Device and Show Memory Resources Commands. + - I(reserved) will be set automatically if this is not configured. + type: int + required: false + socket: + description: + - This enables to set the configuration for each socket by using the socket ID. + - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) within one socket. + type: list + elements: dict + suboptions: + id: + description: The socket ID of the PMem module. + type: int + required: true + appdirect: + description: + - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)) within the socket ID. + type: int + required: true + appdirect_interleaved: + description: + - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. + type: bool + required: false + default: true + memorymode: + description: + - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)) within the socket ID. + type: int + required: true + reserved: + description: + - Percentage of the capacity to reserve (C(0)-C(100)) within the socket ID. + type: int + namespace: + description: + - This enables to set the configuration for the namespace of the PMem. + type: list + elements: dict + suboptions: + mode: + description: + - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. + type: str + required: true + choices: ['raw', 'sector', 'fsdax', 'devdax'] + type: + description: + - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. + type: str + required: false + choices: ['pmem', 'blk'] + size: + description: + - The size of namespace. This option supports the suffixes C(k) or C(K) or C(KB) for KiB, + C(m) or C(M) or C(MB) for MiB, C(g) or C(G) or C(GB) for GiB and C(t) or C(T) or C(TB) for TiB. + - This option is required if multiple namespaces are configured. + - If this option is not set, all of the available space of a region is configured. + type: str + required: false + namespace_append: + description: + - Enable to append the new namespaces to the system. + - The default is C(false) so the all existing namespaces not listed in I(namespace) are removed. + type: bool + default: false + required: false +''' + +RETURN = r''' +reboot_required: + description: Indicates that the system reboot is required to complete the PMem configuration. + returned: success + type: bool + sample: true +result: + description: + - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. + - If I(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. + - If I(namespace) argument is provided, shows the detail of each namespace. + returned: success + type: list + elements: dict + contains: + appdirect: + description: AppDirect size in bytes. + type: int + memorymode: + description: Memory Mode size in bytes. + type: int + reserved: + description: Reserved size in bytes. + type: int + socket: + description: The socket ID to be configured. + type: int + namespace: + description: The list of the detail of namespace. + type: list + sample: [ + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 0 + }, + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 1 + } + ] +''' + +EXAMPLES = r''' +- name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent. + community.general.pmem: + appdirect: 10 + memorymode: 70 + +- name: Configure the Pmem as AppDirect 10, Memory Mode 80, and the Reserved 10 percent. + community.general.pmem: + appdirect: 10 + memorymode: 80 + reserved: 10 + +- name: Configure the Pmem as AppDirect with not interleaved 10, Memory Mode 70, and the Reserved 20 percent. + community.general.pmem: + appdirect: 10 + appdirect_interleaved: false + memorymode: 70 + +- name: Configure the Pmem each socket. + community.general.pmem: + socket: + - id: 0 + appdirect: 10 + appdirect_interleaved: false + memorymode: 70 + reserved: 20 + - id: 1 + appdirect: 10 + memorymode: 80 + reserved: 10 + +- name: Configure the two namespaces. + community.general.pmem: + namespace: + - size: 1GB + type: pmem + mode: raw + - size: 320MB + type: pmem + mode: sector +''' + +import json +import re +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, human_to_bytes + +try: + import xmltodict +except ImportError: + HAS_XMLTODICT_LIBRARY = False + XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_XMLTODICT_LIBRARY = True + XMLTODICT_LIBRARY_IMPORT_ERROR = None + + +class PersistentMemory(object): + def __init__(self): + module = AnsibleModule( + argument_spec=dict( + appdirect=dict(type='int'), + appdirect_interleaved=dict(type='bool', default=True), + memorymode=dict(type='int'), + reserved=dict(type='int'), + socket=dict( + type='list', elements='dict', + options=dict( + id=dict(required=True, type='int'), + appdirect=dict(required=True, type='int'), + appdirect_interleaved=dict(type='bool', default=True), + memorymode=dict(required=True, type='int'), + reserved=dict(type='int'), + ), + ), + namespace=dict( + type='list', elements='dict', + options=dict( + mode=dict(required=True, type='str', choices=['raw', 'sector', 'fsdax', 'devdax']), + type=dict(type='str', choices=['pmem', 'blk']), + size=dict(type='str'), + ), + ), + namespace_append=dict(type='bool', default=False), + ), + required_together=( + ['appdirect', 'memorymode'], + ), + required_one_of=( + ['appdirect', 'memorymode', 'socket', 'namespace'], + ), + mutually_exclusive=( + ['appdirect', 'socket'], + ['memorymode', 'socket'], + ['appdirect', 'namespace'], + ['memorymode', 'namespace'], + ['socket', 'namespace'], + ['appdirect', 'namespace_append'], + ['memorymode', 'namespace_append'], + ['socket', 'namespace_append'], + ), + ) + + if not HAS_XMLTODICT_LIBRARY: + module.fail_json( + msg=missing_required_lib('xmltodict'), + exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + + self.ipmctl_exec = module.get_bin_path('ipmctl', True) + self.ndctl_exec = module.get_bin_path('ndctl', True) + + self.appdirect = module.params['appdirect'] + self.interleaved = module.params['appdirect_interleaved'] + self.memmode = module.params['memorymode'] + self.reserved = module.params['reserved'] + self.socket = module.params['socket'] + self.namespace = module.params['namespace'] + self.namespace_append = module.params['namespace_append'] + + self.module = module + self.changed = False + self.result = [] + + def pmem_run_command(self, command, returnCheck=True): + # in case command[] has number + cmd = [str(part) for part in command] + + self.module.log(msg='pmem_run_command: execute: %s' % cmd) + + rc, out, err = self.module.run_command(cmd) + + self.module.log(msg='pmem_run_command: result: %s' % out) + + if returnCheck and rc != 0: + self.module.fail_json(msg='Error while running: %s' % + cmd, rc=rc, out=out, err=err) + + return out + + def pmem_run_ipmctl(self, command, returnCheck=True): + + command = [self.ipmctl_exec] + command + + return self.pmem_run_command(command, returnCheck) + + def pmem_run_ndctl(self, command, returnCheck=True): + + command = [self.ndctl_exec] + command + + return self.pmem_run_command(command, returnCheck) + + def pmem_is_dcpmm_installed(self): + # To check this system has dcpmm + command = ['show', '-system', '-capabilities'] + return self.pmem_run_ipmctl(command) + + def pmem_get_region_align_size(self, region): + aligns = [] + for rg in region: + if rg['align'] not in aligns: + aligns.append(rg['align']) + + return aligns + + def pmem_get_available_region_size(self, region): + available_size = [] + for rg in region: + available_size.append(rg['available_size']) + + return available_size + + def pmem_get_available_region_type(self, region): + types = [] + for rg in region: + if rg['type'] not in types: + types.append(rg['type']) + + return types + + def pmem_argument_check(self): + def namespace_check(self): + command = ['list', '-R'] + out = self.pmem_run_ndctl(command) + if not out: + return 'Available region(s) is not in this system.' + region = json.loads(out) + + aligns = self.pmem_get_region_align_size(region) + if len(aligns) != 1: + return 'Not supported the regions whose alignment size is different.' + + available_size = self.pmem_get_available_region_size(region) + types = self.pmem_get_available_region_type(region) + for ns in self.namespace: + if ns['size']: + try: + size_byte = human_to_bytes(ns['size']) + except ValueError: + return 'The format of size: NNN TB|GB|MB|KB|T|G|M|K|B' + + if size_byte % aligns[0] != 0: + return 'size: %s should be align with %d' % (ns['size'], aligns[0]) + + is_space_enough = False + for i, avail in enumerate(available_size): + if avail > size_byte: + available_size[i] -= size_byte + is_space_enough = True + break + + if is_space_enough is False: + return 'There is not available region for size: %s' % ns['size'] + + ns['size_byte'] = size_byte + + elif len(self.namespace) != 1: + return 'size option is required to configure multiple namespaces' + + if ns['type'] not in types: + return 'type %s is not supported in this system. Supported type: %s' % (ns['type'], types) + + return None + + def percent_check(self, appdirect, memmode, reserved=None): + if appdirect is None or (appdirect < 0 or appdirect > 100): + return 'appdirect percent should be from 0 to 100.' + if memmode is None or (memmode < 0 or memmode > 100): + return 'memorymode percent should be from 0 to 100.' + + if reserved is None: + if appdirect + memmode > 100: + return 'Total percent should be less equal 100.' + else: + if reserved < 0 or reserved > 100: + return 'reserved percent should be from 0 to 100.' + if appdirect + memmode + reserved != 100: + return 'Total percent should be 100.' + + def socket_id_check(self): + command = ['show', '-o', 'nvmxml', '-socket'] + out = self.pmem_run_ipmctl(command) + sockets_dict = xmltodict.parse(out, dict_constructor=dict)['SocketList']['Socket'] + socket_ids = [] + for sl in sockets_dict: + socket_ids.append(int(sl['SocketID'], 16)) + + for skt in self.socket: + if skt['id'] not in socket_ids: + return 'Invalid socket number: %d' % skt['id'] + + return None + + if self.namespace: + return namespace_check(self) + elif self.socket is None: + return percent_check(self, self.appdirect, self.memmode, self.reserved) + else: + ret = socket_id_check(self) + if ret is not None: + return ret + + for skt in self.socket: + ret = percent_check( + self, skt['appdirect'], skt['memorymode'], skt['reserved']) + if ret is not None: + return ret + + return None + + def pmem_remove_namespaces(self): + command = ['list', '-N'] + out = self.pmem_run_ndctl(command) + + # There's nothing namespaces in this system. Nothing to do. + if not out: + return + + namespaces = json.loads(out) + + # Disable and destroy all namespaces + for ns in namespaces: + command = ['disable-namespace', ns['dev']] + self.pmem_run_ndctl(command) + + command = ['destroy-namespace', ns['dev']] + self.pmem_run_ndctl(command) + + return + + def pmem_delete_goal(self): + # delete the goal request + command = ['delete', '-goal'] + self.pmem_run_ipmctl(command) + + def pmem_init_env(self): + if self.namespace is None or (self.namespace and self.namespace_append is False): + self.pmem_remove_namespaces() + if self.namespace is None: + self.pmem_delete_goal() + + def pmem_get_capacity(self, skt=None): + command = ['show', '-d', 'Capacity', '-u', 'B', '-o', 'nvmxml', '-dimm'] + if skt: + command += ['-socket', skt['id']] + out = self.pmem_run_ipmctl(command) + + dimm_list = xmltodict.parse(out, dict_constructor=dict)['DimmList']['Dimm'] + capacity = 0 + for entry in dimm_list: + for key, v in entry.items(): + if key == 'Capacity': + capacity += int(v.split()[0]) + + return capacity + + def pmem_create_memory_allocation(self, skt=None): + def build_ipmctl_creation_opts(self, skt=None): + ipmctl_opts = [] + + if skt: + appdirect = skt['appdirect'] + memmode = skt['memorymode'] + reserved = skt['reserved'] + socket_id = skt['id'] + ipmctl_opts += ['-socket', socket_id] + else: + appdirect = self.appdirect + memmode = self.memmode + reserved = self.reserved + + if reserved is None: + res = 100 - memmode - appdirect + ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % res] + else: + ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % reserved] + + if self.interleaved: + ipmctl_opts += ['PersistentMemoryType=AppDirect'] + else: + ipmctl_opts += ['PersistentMemoryType=AppDirectNotInterleaved'] + + return ipmctl_opts + + def is_allocation_good(self, ipmctl_out, command): + warning = re.compile('WARNING') + error = re.compile('.*Error.*') + ignore_error = re.compile( + 'Do you want to continue? [y/n] Error: Invalid data input.') + + errmsg = '' + rc = True + for line in ipmctl_out.splitlines(): + if warning.match(line): + errmsg = '%s (command: %s)' % (line, command) + rc = False + break + elif error.match(line): + if not ignore_error: + errmsg = '%s (command: %s)' % (line, command) + rc = False + break + + return rc, errmsg + + def get_allocation_result(self, goal, skt=None): + ret = {'appdirect': 0, 'memorymode': 0} + + if skt: + ret['socket'] = skt['id'] + + out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal'] + for entry in out: + + # Probably it's a bug of ipmctl to show the socket goal + # which isn't specified by the -socket option. + # Anyway, filter the noise out here: + if skt and skt['id'] != int(entry['SocketID'], 16): + continue + + for key, v in entry.items(): + if key == 'MemorySize': + ret['memorymode'] += int(v.split()[0]) + elif key == 'AppDirect1Size' or key == 'AapDirect2Size': + ret['appdirect'] += int(v.split()[0]) + + capacity = self.pmem_get_capacity(skt) + ret['reserved'] = capacity - ret['appdirect'] - ret['memorymode'] + + return ret + + reboot_required = False + + ipmctl_opts = build_ipmctl_creation_opts(self, skt) + + # First, do dry run ipmctl create command to check the error and warning. + command = ['create', '-goal'] + ipmctl_opts + out = self.pmem_run_ipmctl(command, returnCheck=False) + rc, errmsg = is_allocation_good(self, out, command) + if rc is False: + return reboot_required, {}, errmsg + + # Run actual creation here + command = ['create', '-u', 'B', '-o', 'nvmxml', '-force', '-goal'] + ipmctl_opts + goal = self.pmem_run_ipmctl(command) + ret = get_allocation_result(self, goal, skt) + reboot_required = True + + return reboot_required, ret, '' + + def pmem_config_namespaces(self, namespace): + command = ['create-namespace', '-m', namespace['mode']] + if namespace['type']: + command += ['-t', namespace['type']] + if 'size_byte' in namespace: + command += ['-s', namespace['size_byte']] + + self.pmem_run_ndctl(command) + + return None + + +def main(): + + pmem = PersistentMemory() + + pmem.pmem_is_dcpmm_installed() + + error = pmem.pmem_argument_check() + if error: + pmem.module.fail_json(msg=error) + + pmem.pmem_init_env() + pmem.changed = True + + if pmem.namespace: + for ns in pmem.namespace: + pmem.pmem_config_namespaces(ns) + + command = ['list', '-N'] + out = pmem.pmem_run_ndctl(command) + all_ns = json.loads(out) + + pmem.result = all_ns + reboot_required = False + elif pmem.socket is None: + reboot_required, ret, errmsg = pmem.pmem_create_memory_allocation() + if errmsg: + pmem.module.fail_json(msg=errmsg) + pmem.result.append(ret) + else: + for skt in pmem.socket: + skt_reboot_required, skt_ret, skt_errmsg = pmem.pmem_create_memory_allocation(skt) + + if skt_errmsg: + pmem.module.fail_json(msg=skt_errmsg) + + if skt_reboot_required: + reboot_required = True + + pmem.result.append(skt_ret) + + pmem.module.exit_json( + changed=pmem.changed, + reboot_required=reboot_required, + result=pmem.result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/portage.py b/ansible_collections/community/general/plugins/modules/portage.py new file mode 100644 index 000000000..1c6b36537 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/portage.py @@ -0,0 +1,587 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, William L Thomson Jr +# Copyright (c) 2013, Yap Sok Ann +# Written by Yap Sok Ann +# Modified by William L. Thomson Jr. +# Based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: portage +short_description: Package manager for Gentoo +description: + - Manages Gentoo packages + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + package: + description: + - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) + aliases: [name] + type: list + elements: str + + state: + description: + - State of the package atom + default: "present" + choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] + type: str + + update: + description: + - Update packages to the best version available (--update) + type: bool + default: false + + backtrack: + description: + - Set backtrack value (C(--backtrack)). + type: int + version_added: 5.8.0 + + deep: + description: + - Consider the entire dependency tree of packages (--deep) + type: bool + default: false + + newuse: + description: + - Include installed packages where USE flags have changed (--newuse) + type: bool + default: false + + changed_use: + description: + - Include installed packages where USE flags have changed, except when + - flags that the user has not enabled are added or removed + - (--changed-use) + type: bool + default: false + + oneshot: + description: + - Do not add the packages to the world file (--oneshot) + type: bool + default: false + + noreplace: + description: + - Do not re-emerge installed packages (--noreplace) + type: bool + default: true + + nodeps: + description: + - Only merge packages but not their dependencies (--nodeps) + type: bool + default: false + + onlydeps: + description: + - Only merge packages' dependencies but not the packages (--onlydeps) + type: bool + default: false + + depclean: + description: + - Remove packages not needed by explicitly merged packages (--depclean) + - If no package is specified, clean up the world's dependencies + - Otherwise, --depclean serves as a dependency aware version of --unmerge + type: bool + default: false + + quiet: + description: + - Run emerge in quiet mode (--quiet) + type: bool + default: false + + verbose: + description: + - Run emerge in verbose mode (--verbose) + type: bool + default: false + + sync: + description: + - Sync package repositories first + - If C(yes), perform "emerge --sync" + - If C(web), perform "emerge-webrsync" + choices: [ "web", "yes", "no" ] + type: str + + getbinpkgonly: + description: + - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf). + type: bool + default: false + version_added: 1.3.0 + + getbinpkg: + description: + - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf). + type: bool + default: false + + usepkgonly: + description: + - Merge only binaries (no compiling). + type: bool + default: false + + usepkg: + description: + - Tries to use the binary package(s) in the locally available packages directory. + type: bool + default: false + + keepgoing: + description: + - Continue as much as possible after an error. + type: bool + default: false + + jobs: + description: + - Specifies the number of packages to build simultaneously. + - "Since version 2.6: Value of 0 or False resets any previously added" + - --jobs setting values + type: int + + loadavg: + description: + - Specifies that no new builds should be started if there are + - other builds running and the load average is at least LOAD + - "Since version 2.6: Value of 0 or False resets any previously added" + - --load-average setting values + type: float + + withbdeps: + description: + - Specifies that build time dependencies should be installed. + type: bool + version_added: 5.8.0 + + quietbuild: + description: + - Redirect all build output to logs alone, and do not display it + - on stdout (--quiet-build) + type: bool + default: false + + quietfail: + description: + - Suppresses display of the build log on stdout (--quiet-fail) + - Only the die message and the path of the build log will be + - displayed on stdout. + type: bool + default: false + +author: + - "William L Thomson Jr (@wltjr)" + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare (@Tatsh)" +''' + +EXAMPLES = ''' +- name: Make sure package foo is installed + community.general.portage: + package: foo + state: present + +- name: Make sure package foo is not installed + community.general.portage: + package: foo + state: absent + +- name: Update package foo to the latest version (os specific alternative to latest) + community.general.portage: + package: foo + update: true + +- name: Install package foo using PORTAGE_BINHOST setup + community.general.portage: + package: foo + getbinpkg: true + +- name: Re-install world from binary packages only and do not allow any compiling + community.general.portage: + package: '@world' + usepkgonly: true + +- name: Sync repositories and update world + community.general.portage: + package: '@world' + update: true + deep: true + sync: true + +- name: Remove unneeded packages + community.general.portage: + depclean: true + +- name: Remove package foo if it is not explicitly needed + community.general.portage: + package: foo + state: absent + depclean: true +''' + +import os +import re +import sys +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.respawn import has_respawned, respawn_module +from ansible.module_utils.common.text.converters import to_native + + +try: + from portage.dbapi import vartree + from portage.exception import InvalidAtom + HAS_PORTAGE = True + PORTAGE_IMPORT_ERROR = None +except ImportError: + HAS_PORTAGE = False + PORTAGE_IMPORT_ERROR = traceback.format_exc() + + +def query_package(module, package, action): + if package.startswith('@'): + return query_set(module, package, action) + return query_atom(module, package, action) + + +def query_atom(module, atom, action): + vdb = vartree.vardbapi() + try: + exists = vdb.match(atom) + except InvalidAtom: + return False + return bool(exists) + + +def query_set(module, set, action): + system_sets = [ + '@live-rebuild', + '@module-rebuild', + '@preserved-rebuild', + '@security', + '@selected', + '@system', + '@world', + '@x11-module-rebuild', + ] + + if set in system_sets: + if action == 'unmerge': + module.fail_json(msg='set %s cannot be removed' % set) + return False + + world_sets_path = '/var/lib/portage/world_sets' + if not os.path.exists(world_sets_path): + return False + + cmd = 'grep %s %s' % (set, world_sets_path) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def sync_repositories(module, webrsync=False): + if module.check_mode: + module.exit_json(msg='check mode not supported by sync') + + if webrsync: + webrsync_path = module.get_bin_path('emerge-webrsync', required=True) + cmd = '%s --quiet' % webrsync_path + else: + cmd = '%s --sync --quiet --ask=n' % module.emerge_path + + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg='could not sync package repositories') + + +# Note: In the 3 functions below, package querying is done one-by-one, +# but emerge is done in one go. If that is not desirable, split the +# packages into multiple tasks instead of joining them together with +# comma. + + +def emerge_packages(module, packages): + """Run emerge command against given list of atoms.""" + p = module.params + + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'): + for package in packages: + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'): + break + else: + module.exit_json(changed=False, msg='Packages already present.') + if module.check_mode: + module.exit_json(changed=True, msg='Packages would be installed.') + + args = [] + emerge_flags = { + 'update': '--update', + 'deep': '--deep', + 'newuse': '--newuse', + 'changed_use': '--changed-use', + 'oneshot': '--oneshot', + 'noreplace': '--noreplace', + 'nodeps': '--nodeps', + 'onlydeps': '--onlydeps', + 'quiet': '--quiet', + 'verbose': '--verbose', + 'getbinpkgonly': '--getbinpkgonly', + 'getbinpkg': '--getbinpkg', + 'usepkgonly': '--usepkgonly', + 'usepkg': '--usepkg', + 'keepgoing': '--keep-going', + 'quietbuild': '--quiet-build', + 'quietfail': '--quiet-fail', + } + for flag, arg in emerge_flags.items(): + if p[flag]: + args.append(arg) + + if p['state'] and p['state'] == 'latest': + args.append("--update") + + emerge_flags = { + 'jobs': '--jobs', + 'loadavg': '--load-average', + 'backtrack': '--backtrack', + 'withbdeps': '--with-bdeps', + } + + for flag, arg in emerge_flags.items(): + flag_val = p[flag] + + if flag_val is None: + """Fallback to default: don't use this argument at all.""" + continue + + """Add the --flag=value pair.""" + if isinstance(flag_val, bool): + args.extend((arg, to_native('y' if flag_val else 'n'))) + elif not flag_val: + """If the value is 0 or 0.0: add the flag, but not the value.""" + args.append(arg) + else: + args.extend((arg, to_native(flag_val))) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not installed.', + ) + + # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite + # this error + if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \ + and 'Permission denied (publickey).' in err: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Please check your PORTAGE_BINHOST configuration in make.conf ' + 'and your SSH authorized_keys file', + ) + + changed = True + for line in out.splitlines(): + if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): + msg = 'Packages installed.' + break + elif module.check_mode and re.match(r'\[(binary|ebuild)', line): + msg = 'Packages would be installed.' + break + else: + changed = False + msg = 'No packages installed.' + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg=msg, + ) + + +def unmerge_packages(module, packages): + p = module.params + + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--unmerge'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not removed.', + ) + + module.exit_json( + changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages removed.', + ) + + +def cleanup_packages(module, packages): + p = module.params + + if packages: + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--depclean'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) + + removed = 0 + for line in out.splitlines(): + if not line.startswith('Number removed:'): + continue + parts = line.split(':') + removed = int(parts[1].strip()) + changed = removed > 0 + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Depclean completed.', + ) + + +def run_emerge(module, packages, *args): + args = list(args) + + args.append('--ask=n') + if module.check_mode: + args.append('--pretend') + + cmd = [module.emerge_path] + args + packages + return cmd, module.run_command(cmd) + + +portage_present_states = ['present', 'emerged', 'installed', 'latest'] +portage_absent_states = ['absent', 'unmerged', 'removed'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + package=dict(type='list', elements='str', default=None, aliases=['name']), + state=dict( + default=portage_present_states[0], + choices=portage_present_states + portage_absent_states, + ), + update=dict(default=False, type='bool'), + backtrack=dict(default=None, type='int'), + deep=dict(default=False, type='bool'), + newuse=dict(default=False, type='bool'), + changed_use=dict(default=False, type='bool'), + oneshot=dict(default=False, type='bool'), + noreplace=dict(default=True, type='bool'), + nodeps=dict(default=False, type='bool'), + onlydeps=dict(default=False, type='bool'), + depclean=dict(default=False, type='bool'), + quiet=dict(default=False, type='bool'), + verbose=dict(default=False, type='bool'), + sync=dict(default=None, choices=['yes', 'web', 'no']), + getbinpkgonly=dict(default=False, type='bool'), + getbinpkg=dict(default=False, type='bool'), + usepkgonly=dict(default=False, type='bool'), + usepkg=dict(default=False, type='bool'), + keepgoing=dict(default=False, type='bool'), + jobs=dict(default=None, type='int'), + loadavg=dict(default=None, type='float'), + withbdeps=dict(default=None, type='bool'), + quietbuild=dict(default=False, type='bool'), + quietfail=dict(default=False, type='bool'), + ), + required_one_of=[['package', 'sync', 'depclean']], + mutually_exclusive=[ + ['nodeps', 'onlydeps'], + ['quiet', 'verbose'], + ['quietbuild', 'verbose'], + ['quietfail', 'verbose'], + ], + supports_check_mode=True, + ) + + if not HAS_PORTAGE: + if sys.executable != '/usr/bin/python' and not has_respawned(): + respawn_module('/usr/bin/python') + else: + module.fail_json(msg=missing_required_lib('portage'), + exception=PORTAGE_IMPORT_ERROR) + + module.emerge_path = module.get_bin_path('emerge', required=True) + + p = module.params + + if p['sync'] and p['sync'].strip() != 'no': + sync_repositories(module, webrsync=(p['sync'] == 'web')) + if not p['package']: + module.exit_json(msg='Sync successfully finished.') + + packages = [] + if p['package']: + packages.extend(p['package']) + + if p['depclean']: + if packages and p['state'] not in portage_absent_states: + module.fail_json( + msg='Depclean can only be used with package when the state is ' + 'one of: %s' % portage_absent_states, + ) + + cleanup_packages(module, packages) + + elif p['state'] in portage_present_states: + emerge_packages(module, packages) + + elif p['state'] in portage_absent_states: + unmerge_packages(module, packages) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/portinstall.py b/ansible_collections/community/general/plugins/modules/portinstall.py new file mode 100644 index 000000000..e263b7181 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/portinstall.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, berenddeboer +# Written by berenddeboer +# Based on pkgng module written by bleader +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: portinstall +short_description: Installing packages from FreeBSD's ports system +description: + - Manage packages for FreeBSD using 'portinstall'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - name of package to install/remove + aliases: [pkg] + required: true + type: str + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present + type: str + use_packages: + description: + - use packages instead of ports whenever available + type: bool + required: false + default: true +author: "berenddeboer (@berenddeboer)" +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.portinstall: + name: foo + state: present + +- name: Install package security/cyrus-sasl2-saslauthd + community.general.portinstall: + name: security/cyrus-sasl2-saslauthd + state: present + +- name: Remove packages foo and bar + community.general.portinstall: + name: foo,bar + state: absent +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def query_package(module, name): + + pkg_info_path = module.get_bin_path('pkg_info', False) + + # Assume that if we have pkg_info, we haven't upgraded to pkgng + if pkg_info_path: + pkgng = False + pkg_glob_path = module.get_bin_path('pkg_glob', True) + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) + else: + pkgng = True + pkg_info_path = module.get_bin_path('pkg', True) + pkg_info_path = pkg_info_path + " info" + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) + + found = rc == 0 + + if not found: + # databases/mysql55-client installs as mysql-client, so try solving + # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking + # some package is installed + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + if pkgng: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + else: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + + found = rc == 0 + + return found + + +def matching_packages(module, name): + + ports_glob_path = module.get_bin_path('ports_glob', True) + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) + # counts the number of packages found + occurrences = out.count('\n') + if occurrences == 0: + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) + occurrences = out.count('\n') + return occurrences + + +def remove_packages(module, packages): + + remove_c = 0 + pkg_glob_path = module.get_bin_path('pkg_glob', True) + + # If pkg_delete not found, we assume pkgng + pkg_delete_path = module.get_bin_path('pkg_delete', False) + if not pkg_delete_path: + pkg_delete_path = module.get_bin_path('pkg', True) + pkg_delete_path = pkg_delete_path + " delete -y" + + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) + + if query_package(module, package): + name_without_digits = re.sub('[0-9]', '', package) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, + shlex_quote(name_without_digits)), + use_unsafe_shell=True) + if query_package(module, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages, use_packages): + + install_c = 0 + + # If portinstall not found, automagically install + portinstall_path = module.get_bin_path('portinstall', False) + if not portinstall_path: + pkg_path = module.get_bin_path('pkg', False) + if pkg_path: + module.run_command("pkg install -y portupgrade") + portinstall_path = module.get_bin_path('portinstall', True) + + if use_packages: + portinstall_params = "--use-packages" + else: + portinstall_params = "" + + for package in packages: + if query_package(module, package): + continue + + # TODO: check how many match + matches = matching_packages(module, package) + if matches == 1: + rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) + if not query_package(module, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + elif matches == 0: + module.fail_json(msg="no matches for package %s" % (package)) + else: + module.fail_json(msg="%s matches found for package name %s" % (matches, package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + name=dict(aliases=["pkg"], required=True), + use_packages=dict(type='bool', default=True))) + + p = module.params + + pkgs = p["name"].split(",") + + if p["state"] == "present": + install_packages(module, pkgs, p["use_packages"]) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org.py b/ansible_collections/community/general/plugins/modules/pritunl_org.py new file mode 100644 index 000000000..df2df4494 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pritunl_org.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_org +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: Manages Pritunl Organizations using the Pritunl API +description: + - A module to manage Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + required: true + aliases: + - org + description: + - The name of the organization to manage in Pritunl. + + force: + type: bool + default: false + description: + - If I(force) is C(true) and I(state) is C(absent), the module + will delete the organization, no matter if it contains users + or not. By default I(force) is C(false), which will cause the + module to fail the deletion of the organization when it contains + users. + + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If C(present), the module adds organization I(name) to + Pritunl. If C(absent), attempt to delete the organization + from Pritunl (please read about I(force) usage). +""" + +EXAMPLES = """ +- name: Ensure the organization named MyOrg exists + community.general.pritunl_org: + state: present + name: MyOrg + +- name: Ensure the organization named MyOrg does not exist + community.general.pritunl_org: + state: absent + name: MyOrg +""" + +RETURN = """ +response: + description: JSON representation of a Pritunl Organization. + returned: success + type: dict + sample: + { + "auth_api": false, + "name": "Foo", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3", + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_organization, + post_pritunl_organization, + list_pritunl_organizations, + get_pritunl_settings, + pritunl_argument_spec, +) + + +def add_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + # If the organization already exists + if len(org_obj_list) > 0: + result["changed"] = False + result["response"] = org_obj_list[0] + else: + # Otherwise create it + response = post_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_name": org_name}, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + force = module.params.get("force") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + # No organization found + if len(org_obj_list) == 0: + result["changed"] = False + result["response"] = {} + + else: + # Otherwise attempt to delete it + org = org_obj_list[0] + + # Only accept deletion under specific conditions + if force or org["user_count"] == 0: + response = delete_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_id": org["id"]}, + ) + ) + result["changed"] = True + result["response"] = response + else: + module.fail_json( + msg=( + "Can not remove organization '%s' with %d attached users. " + "Either set 'force' option to true or remove active users " + "from the organization" + ) + % (org_name, org["user_count"]) + ) + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + name=dict(required=True, type="str", aliases=["org"]), + force=dict(required=False, type="bool", default=False), + state=dict( + required=False, choices=["present", "absent"], default="present" + ), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_pritunl_organization(module) + elif state == "absent": + remove_pritunl_organization(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org_info.py b/ansible_collections/community/general/plugins/modules/pritunl_org_info.py new file mode 100644 index 000000000..979e29b5a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pritunl_org_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_org_info +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: List Pritunl Organizations using the Pritunl API +description: + - A module to list Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module +options: + organization: + type: str + required: false + aliases: + - org + default: null + description: + - Name of the Pritunl organization to search for. + If none provided, the module will return all Pritunl + organizations. +""" + +EXAMPLES = """ +- name: List all existing Pritunl organizations + community.general.pritunl_org_info: + +- name: Search for an organization named MyOrg + community.general.pritunl_user_info: + organization: MyOrg +""" + +RETURN = """ +organizations: + description: List of Pritunl organizations. + returned: success + type: list + elements: dict + sample: + [ + { + "auth_api": false, + "name": "FooOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3", + }, + { + "auth_api": false, + "name": "MyOrg", + "auth_token": null, + "user_count": 3, + "auth_secret": null, + "id": "58070daee63f3b2e6e472c36", + }, + { + "auth_api": false, + "name": "BarOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "v1sncsxxybnsylc8gpqg85pg", + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + pritunl_argument_spec, +) + + +def get_pritunl_organizations(module): + org_name = module.params.get("organization") + + organizations = [] + + organizations = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name} if org_name else None}, + ) + ) + + if org_name and len(organizations) == 0: + # When an org_name is provided but no organization match return an error + module.fail_json(msg="Organization '%s' does not exist" % org_name) + + result = {} + result["changed"] = False + result["organizations"] = organizations + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=False, type="str", default=None, aliases=["org"]) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_organizations(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user.py b/ansible_collections/community/general/plugins/modules/pritunl_user.py new file mode 100644 index 000000000..5aac23393 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pritunl_user.py @@ -0,0 +1,361 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_user +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: Manage Pritunl Users using the Pritunl API +description: + - A module to manage Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If C(present), the module adds user I(user_name) to + the Pritunl I(organization). If C(absent), removes the user + I(user_name) from the Pritunl I(organization). + + user_name: + type: str + required: true + default: null + description: + - Name of the user to create or delete from Pritunl. + + user_email: + type: str + required: false + default: null + description: + - Email address associated with the user I(user_name). + + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user I(user_name). + + user_groups: + type: list + elements: str + required: false + default: null + description: + - List of groups associated with the user I(user_name). + + user_disabled: + type: bool + required: false + default: null + description: + - Enable/Disable the user I(user_name). + + user_gravatar: + type: bool + required: false + default: null + description: + - Enable/Disable Gravatar usage for the user I(user_name). + + user_mac_addresses: + type: list + elements: str + description: + - Allowed MAC addresses for the user I(user_name). + version_added: 5.0.0 +""" + +EXAMPLES = """ +- name: Create the user Foo with email address foo@bar.com in MyOrg + community.general.pritunl_user: + state: present + organization: MyOrg + user_name: Foo + user_email: foo@bar.com + user_mac_addresses: + - "00:00:00:00:00:99" + +- name: Disable the user Foo but keep it in Pritunl + community.general.pritunl_user: + state: present + organization: MyOrg + user_name: Foo + user_email: foo@bar.com + user_disabled: true + +- name: Make sure the user Foo is not part of MyOrg anymore + community.general.pritunl_user: + state: absent + organization: MyOrg + user_name: Foo +""" + +RETURN = """ +response: + description: JSON representation of Pritunl Users. + returned: success + type: dict + sample: + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [], + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_user, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + post_pritunl_user, + pritunl_argument_spec, +) + + +def add_or_update_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + user_params = { + "name": user_name, + "email": module.params.get("user_email"), + "groups": module.params.get("user_groups"), + "disabled": module.params.get("user_disabled"), + "gravatar": module.params.get("user_gravatar"), + "mac_addresses": module.params.get("user_mac_addresses"), + "type": module.params.get("user_type"), + } + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not add user to organization '%s' which does not exist" % org_name + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user already exists + if len(users) > 0: + # Compare remote user params with local user_params and trigger update if needed + user_params_changed = False + for key in user_params.keys(): + # When a param is not specified grab existing ones to prevent from changing it with the PUT request + if user_params[key] is None: + user_params[key] = users[0][key] + + # 'groups' and 'mac_addresses' are list comparison + if key == "groups" or key == "mac_addresses": + if set(users[0][key]) != set(user_params[key]): + user_params_changed = True + + # otherwise it is either a boolean or a string + else: + if users[0][key] != user_params[key]: + user_params_changed = True + + # Trigger a PUT on the API to update the current user if settings have changed + if user_params_changed: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + "user_data": user_params, + }, + ) + ) + + result["changed"] = True + result["response"] = response + else: + result["changed"] = False + result["response"] = users + else: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_data": user_params, + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not remove user '%s' from a non existing organization '%s'" + % (user_name, org_name) + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user exists, if not, do nothing + if len(users) == 0: + result["changed"] = False + result["response"] = {} + + # Otherwise remove the org from Pritunl + else: + response = delete_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + state=dict( + required=False, choices=["present", "absent"], default="present" + ), + user_name=dict(required=True, type="str"), + user_type=dict( + required=False, choices=["client", "server"], default="client" + ), + user_email=dict(required=False, type="str", default=None), + user_groups=dict(required=False, type="list", elements="str", default=None), + user_disabled=dict(required=False, type="bool", default=None), + user_gravatar=dict(required=False, type="bool", default=None), + user_mac_addresses=dict(required=False, type="list", elements="str", default=None), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_or_update_pritunl_user(module) + elif state == "absent": + remove_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py new file mode 100644 index 000000000..7b0399061 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_user_info +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: List Pritunl Users using the Pritunl API +description: + - A module to list Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + + user_name: + type: str + required: false + description: + - Name of the user to filter on Pritunl. + + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user I(user_name). +""" + +EXAMPLES = """ +- name: List all existing users part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + +- name: Search for the user named Florian part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + user_name: Florian +""" + +RETURN = """ +users: + description: List of Pritunl users. + returned: success + type: list + elements: dict + sample: + [ + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [], + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + pritunl_argument_spec, +) + + +def get_pritunl_user(module): + user_name = module.params.get("user_name") + user_type = module.params.get("user_type") + org_name = module.params.get("organization") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}}) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not list users from the organization '%s' which does not exist" + % org_name + ) + + org_id = org_obj_list[0]["id"] + + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": ( + {"type": user_type} + if user_name is None + else {"name": user_name, "type": user_type} + ), + }, + ) + ) + + result = {} + result["changed"] = False + result["users"] = users + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + user_name=dict(required=False, type="str", default=None), + user_type=dict( + required=False, + choices=["client", "server"], + default="client", + ), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/profitbricks.py b/ansible_collections/community/general/plugins/modules/profitbricks.py new file mode 100644 index 000000000..c8bcceb93 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/profitbricks.py @@ -0,0 +1,667 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: profitbricks +short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine +description: + - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait + for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + auto_increment: + description: + - Whether or not to increment a single number in the name for created virtual machines. + type: bool + default: true + name: + description: + - The name of the virtual machine. + type: str + image: + description: + - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. + type: str + image_password: + description: + - Password set for the administrative user. + type: str + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + type: list + elements: str + default: [] + datacenter: + description: + - The datacenter to provision this virtual machine. + type: str + cores: + description: + - The number of CPU cores to allocate to the virtual machine. + default: 2 + type: int + ram: + description: + - The amount of memory to allocate to the virtual machine. + default: 2048 + type: int + cpu_family: + description: + - The CPU family type to allocate to the virtual machine. + type: str + default: AMD_OPTERON + choices: [ "AMD_OPTERON", "INTEL_XEON" ] + volume_size: + description: + - The size in GB of the boot volume. + type: int + default: 10 + bus: + description: + - The bus type for the volume. + type: str + default: VIRTIO + choices: [ "IDE", "VIRTIO"] + instance_ids: + description: + - list of instance ids, currently only used when state='absent' to remove instances. + type: list + elements: str + default: [] + count: + description: + - The number of virtual machines to create. + type: int + default: 1 + location: + description: + - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. + type: str + default: us/las + choices: [ "us/las", "de/fra", "de/fkb" ] + assign_public_ip: + description: + - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. + type: bool + default: false + lan: + description: + - The ID of the LAN you wish to add the servers to. + type: int + default: 1 + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + type: str + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + type: str + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: true + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + remove_boot_volume: + description: + - remove the bootVolume of the virtual machine you're destroying. + type: bool + default: true + state: + description: + - create or terminate instances + - 'The choices available are: C(running), C(stopped), C(absent), C(present).' + type: str + default: 'present' + disk_type: + description: + - the type of disk to be allocated. + type: str + choices: [SSD, HDD] + default: HDD + +requirements: + - "profitbricks" + - "python >= 2.6" +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' + +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Provisioning example +- name: Create three servers and enumerate their names + community.general.profitbricks: + datacenter: Tardis One + name: web%02d.stackpointcloud.com + cores: 4 + ram: 2048 + volume_size: 50 + cpu_family: INTEL_XEON + image: a3eae284-a2fe-11e4-b187-5f1f641608c8 + location: us/las + count: 3 + assign_public_ip: true + +- name: Remove virtual machines + community.general.profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: absent + +- name: Start virtual machines + community.general.profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: running + +- name: Stop virtual machines + community.general.profitbricks: + datacenter: Tardis One + instance_ids: + - 'web001.stackpointcloud.com' + - 'web002.stackpointcloud.com' + - 'web003.stackpointcloud.com' + wait_timeout: 500 + state: stopped +''' + +import re +import uuid +import time +import traceback + +HAS_PB_SDK = True + +try: + from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.common.text.converters import to_native + + +LOCATIONS = ['us/las', + 'de/fra', + 'de/fkb'] + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _create_machine(module, profitbricks, datacenter, name): + cores = module.params.get('cores') + ram = module.params.get('ram') + cpu_family = module.params.get('cpu_family') + volume_size = module.params.get('volume_size') + disk_type = module.params.get('disk_type') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') + bus = module.params.get('bus') + lan = module.params.get('lan') + assign_public_ip = module.params.get('assign_public_ip') + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + location = module.params.get('location') + image = module.params.get('image') + assign_public_ip = module.boolean(module.params.get('assign_public_ip')) + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + if assign_public_ip: + public_found = False + + lans = profitbricks.list_lans(datacenter) + for lan in lans['items']: + if lan['properties']['public']: + public_found = True + lan = lan['id'] + + if not public_found: + i = LAN( + name='public', + public=True) + + lan_response = profitbricks.create_lan(datacenter, i) + _wait_for_completion(profitbricks, lan_response, + wait_timeout, "_create_machine") + lan = lan_response['id'] + + v = Volume( + name=str(uuid.uuid4()).replace('-', '')[:10], + size=volume_size, + image=image, + image_password=image_password, + ssh_keys=ssh_keys, + disk_type=disk_type, + bus=bus) + + n = NIC( + lan=int(lan) + ) + + s = Server( + name=name, + ram=ram, + cores=cores, + cpu_family=cpu_family, + create_volumes=[v], + nics=[n], + ) + + try: + create_server_response = profitbricks.create_server( + datacenter_id=datacenter, server=s) + + _wait_for_completion(profitbricks, create_server_response, + wait_timeout, "create_virtual_machine") + + server_response = profitbricks.get_server( + datacenter_id=datacenter, + server_id=create_server_response['id'], + depth=3 + ) + except Exception as e: + module.fail_json(msg="failed to create the new server: %s" % str(e)) + else: + return server_response + + +def _startstop_machine(module, profitbricks, datacenter_id, server_id): + state = module.params.get('state') + + try: + if state == 'running': + profitbricks.start_server(datacenter_id, server_id) + else: + profitbricks.stop_server(datacenter_id, server_id) + + return True + except Exception as e: + module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) + + +def _create_datacenter(module, profitbricks): + datacenter = module.params.get('datacenter') + location = module.params.get('location') + wait_timeout = module.params.get('wait_timeout') + + i = Datacenter( + name=datacenter, + location=location + ) + + try: + datacenter_response = profitbricks.create_datacenter(datacenter=i) + + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "_create_datacenter") + + return datacenter_response + except Exception as e: + module.fail_json(msg="failed to create the new server(s): %s" % str(e)) + + +def create_virtual_machine(module, profitbricks): + """ + Create new virtual machine + + module : AnsibleModule object + community.general.profitbricks: authenticated profitbricks object + + Returns: + True if a new virtual machine was created, false otherwise + """ + datacenter = module.params.get('datacenter') + name = module.params.get('name') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + lan = module.params.get('lan') + wait_timeout = module.params.get('wait_timeout') + failed = True + datacenter_found = False + + virtual_machines = [] + virtual_machine_ids = [] + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if datacenter_id: + datacenter_found = True + + if not datacenter_found: + datacenter_response = _create_datacenter(module, profitbricks) + datacenter_id = datacenter_response['id'] + + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "create_virtual_machine") + + if auto_increment: + numbers = set() + count_offset = 1 + + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + number_range = xrange(count_offset, count_offset + count + len(numbers)) + available_numbers = list(set(number_range).difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] + + # Prefetch a list of servers for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for name in names: + # Skip server creation if the server already exists. + if _get_server_id(server_list, name): + continue + + create_response = _create_machine(module, profitbricks, str(datacenter_id), name) + nics = profitbricks.list_nics(datacenter_id, create_response['id']) + for n in nics['items']: + if lan == n['properties']['lan']: + create_response.update({'public_ip': n['properties']['ips'][0]}) + + virtual_machines.append(create_response) + + failed = False + + results = { + 'failed': failed, + 'machines': virtual_machines, + 'action': 'create', + 'instance_ids': { + 'instances': [i['id'] for i in virtual_machines], + } + } + + return results + + +def remove_virtual_machine(module, profitbricks): + """ + Removes a virtual machine. + + This will remove the virtual machine along with the bootVolume. + + module : AnsibleModule object + community.general.profitbricks: authenticated profitbricks object. + + Not yet supported: handle deletion of attached data disks. + + Returns: + True if a new virtual server was deleted, false otherwise + """ + datacenter = module.params.get('datacenter') + instance_ids = module.params.get('instance_ids') + remove_boot_volume = module.params.get('remove_boot_volume') + changed = False + + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID for server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + # Remove the server's boot volume + if remove_boot_volume: + _remove_boot_volume(module, profitbricks, datacenter_id, server_id) + + # Remove the server + try: + server_response = profitbricks.delete_server(datacenter_id, server_id) + except Exception as e: + module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) + else: + changed = True + + return changed + + +def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): + """ + Remove the boot volume from the server + """ + try: + server = profitbricks.get_server(datacenter_id, server_id) + volume_id = server['properties']['bootVolume']['id'] + volume_response = profitbricks.delete_volume(datacenter_id, volume_id) + except Exception as e: + module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) + + +def startstop_machine(module, profitbricks, state): + """ + Starts or Stops a virtual machine. + + module : AnsibleModule object + community.general.profitbricks: authenticated profitbricks object. + + Returns: + True when the servers process the action successfully, false otherwise. + """ + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + changed = False + + datacenter = module.params.get('datacenter') + instance_ids = module.params.get('instance_ids') + + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID of server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + _startstop_machine(module, profitbricks, datacenter_id, server_id) + changed = True + + if wait: + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + matched_instances = [] + for res in profitbricks.list_servers(datacenter_id)['items']: + if state == 'running': + if res['properties']['vmState'].lower() == state: + matched_instances.append(res) + elif state == 'stopped': + if res['properties']['vmState'].lower() == 'shutoff': + matched_instances.append(res) + + if len(matched_instances) < len(instance_ids): + time.sleep(5) + else: + break + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) + + return (changed) + + +def _get_datacenter_id(datacenters, identity): + """ + Fetch and return datacenter UUID by datacenter name if found. + """ + for datacenter in datacenters['items']: + if identity in (datacenter['properties']['name'], datacenter['id']): + return datacenter['id'] + return None + + +def _get_server_id(servers, identity): + """ + Fetch and return server UUID by server name if found. + """ + for server in servers['items']: + if identity in (server['properties']['name'], server['id']): + return server['id'] + return None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + name=dict(), + image=dict(), + cores=dict(type='int', default=2), + ram=dict(type='int', default=2048), + cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], + default='AMD_OPTERON'), + volume_size=dict(type='int', default=10), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), + image_password=dict(no_log=True), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), + lan=dict(type='int', default=1), + count=dict(type='int', default=1), + auto_increment=dict(type='bool', default=True), + instance_ids=dict(type='list', elements='str', default=[]), + subscription_user=dict(), + subscription_password=dict(no_log=True), + location=dict(choices=LOCATIONS, default='us/las'), + assign_public_ip=dict(type='bool', default=False), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + remove_boot_volume=dict(type='bool', default=True), + state=dict(default='present'), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required ' + + 'for running or stopping machines.') + + try: + (changed) = remove_virtual_machine(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state in ('running', 'stopped'): + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for ' + + 'running or stopping machines.') + try: + (changed) = startstop_machine(module, profitbricks, state) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state == 'present': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for new instance') + if not module.params.get('image'): + module.fail_json(msg='image parameter is required for new instance') + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is ' + + 'required for new instance') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is ' + + 'required for new instance') + + try: + (machine_dict_array) = create_virtual_machine(module, profitbricks) + module.exit_json(**machine_dict_array) + except Exception as e: + module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py new file mode 100644 index 000000000..a096db752 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: profitbricks_datacenter +short_description: Create or destroy a ProfitBricks Virtual Datacenter +description: + - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency + on profitbricks >= 1.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The name of the virtual datacenter. + type: str + description: + description: + - The description of the virtual datacenter. + type: str + required: false + location: + description: + - The datacenter location. + type: str + required: false + default: us/las + choices: [ "us/las", "de/fra", "de/fkb" ] + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + type: str + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + type: str + required: false + wait: + description: + - wait for the datacenter to be created before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + state: + description: + - Create or terminate datacenters. + - "The available choices are: C(present), C(absent)." + type: str + required: false + default: 'present' + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' +- name: Create a datacenter + community.general.profitbricks_datacenter: + datacenter: Tardis One + wait_timeout: 500 + +- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter) + community.general.profitbricks_datacenter: + datacenter: Tardis One + wait_timeout: 500 + state: absent +''' + +import re +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, Datacenter +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +LOCATIONS = ['us/las', + 'de/fra', + 'de/fkb'] + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _remove_datacenter(module, profitbricks, datacenter): + try: + profitbricks.delete_datacenter(datacenter) + except Exception as e: + module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) + + +def create_datacenter(module, profitbricks): + """ + Creates a Datacenter + + This will create a new Datacenter in the specified location. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if a new datacenter was created, false otherwise + """ + name = module.params.get('name') + location = module.params.get('location') + description = module.params.get('description') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + i = Datacenter( + name=name, + location=location, + description=description + ) + + try: + datacenter_response = profitbricks.create_datacenter(datacenter=i) + + if wait: + _wait_for_completion(profitbricks, datacenter_response, + wait_timeout, "_create_datacenter") + + results = { + 'datacenter_id': datacenter_response['id'] + } + + return results + + except Exception as e: + module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) + + +def remove_datacenter(module, profitbricks): + """ + Removes a Datacenter. + + This will remove a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the datacenter was deleted, false otherwise + """ + name = module.params.get('name') + changed = False + + if uuid_match.match(name): + _remove_datacenter(module, profitbricks, name) + changed = True + else: + datacenters = profitbricks.list_datacenters() + + for d in datacenters['items']: + vdc = profitbricks.get_datacenter(d['id']) + + if name == vdc['properties']['name']: + name = d['id'] + _remove_datacenter(module, profitbricks, name) + changed = True + + return changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(), + description=dict(), + location=dict(choices=LOCATIONS, default='us/las'), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(default=600, type='int'), + state=dict(default='present'), # @TODO add choices + ) + ) + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required deleting a virtual datacenter.') + + try: + (changed) = remove_datacenter(module, profitbricks) + module.exit_json( + changed=changed) + except Exception as e: + module.fail_json(msg='failed to set datacenter state: %s' % str(e)) + + elif state == 'present': + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for a new datacenter') + if not module.params.get('location'): + module.fail_json(msg='location parameter is required for a new datacenter') + + try: + (datacenter_dict_array) = create_datacenter(module, profitbricks) + module.exit_json(**datacenter_dict_array) + except Exception as e: + module.fail_json(msg='failed to set datacenter state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py new file mode 100644 index 000000000..17a30b052 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: profitbricks_nic +short_description: Create or Remove a NIC +description: + - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + datacenter: + description: + - The datacenter in which to operate. + type: str + required: true + server: + description: + - The server name or ID. + type: str + required: true + name: + description: + - The name or ID of the NIC. This is only required on deletes, but not on create. + - If not specified, it defaults to a value based on UUID4. + type: str + lan: + description: + - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. + type: str + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + type: str + required: true + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + type: str + required: true + wait: + description: + - wait for the operation to complete before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + state: + description: + - Indicate desired state of the resource + - "The available choices are: C(present), C(absent)." + type: str + required: false + default: 'present' + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' +- name: Create a NIC + community.general.profitbricks_nic: + datacenter: Tardis One + server: node002 + lan: 2 + wait_timeout: 500 + state: present + +- name: Remove a NIC + community.general.profitbricks_nic: + datacenter: Tardis One + server: node002 + name: 7341c2454f + wait_timeout: 500 + state: absent +''' + +import re +import uuid +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, NIC +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _make_default_name(): + return str(uuid.uuid4()).replace('-', '')[:10] + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def create_nic(module, profitbricks): + """ + Creates a NIC. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the nic creates, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + lan = module.params.get('lan') + name = module.params.get('name') + if name is None: + name = _make_default_name() + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + try: + n = NIC( + name=name, + lan=lan + ) + + nic_response = profitbricks.create_nic(datacenter, server, n) + + if wait: + _wait_for_completion(profitbricks, nic_response, + wait_timeout, "create_nic") + + return nic_response + + except Exception as e: + module.fail_json(msg="failed to create the NIC: %s" % str(e)) + + +def delete_nic(module, profitbricks): + """ + Removes a NIC + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the NIC was removed, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + name = module.params.get('name') + if name is None: + name = _make_default_name() + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + server_found = False + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server_found = True + server = s['id'] + break + + if not server_found: + return False + + # Locate UUID for NIC + nic_found = False + if not (uuid_match.match(name)): + nic_list = profitbricks.list_nics(datacenter, server) + for n in nic_list['items']: + if name == n['properties']['name']: + nic_found = True + name = n['id'] + break + + if not nic_found: + return False + + try: + nic_response = profitbricks.delete_nic(datacenter, server, name) + return nic_response + except Exception as e: + module.fail_json(msg="failed to remove the NIC: %s" % str(e)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(required=True), + server=dict(required=True), + name=dict(), + lan=dict(), + subscription_user=dict(required=True), + subscription_password=dict(required=True, no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ), + required_if=( + ('state', 'absent', ['name']), + ('state', 'present', ['lan']), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + try: + (changed) = delete_nic(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set nic state: %s' % str(e)) + + elif state == 'present': + try: + (nic_dict) = create_nic(module, profitbricks) + module.exit_json(nics=nic_dict) # @FIXME changed not calculated? + except Exception as e: + module.fail_json(msg='failed to set nic state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py new file mode 100644 index 000000000..f9d257b68 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py @@ -0,0 +1,440 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: profitbricks_volume +short_description: Create or destroy a volume +description: + - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + datacenter: + description: + - The datacenter in which to create the volumes. + type: str + name: + description: + - The name of the volumes. You can enumerate the names using auto_increment. + type: str + size: + description: + - The size of the volume. + type: int + required: false + default: 10 + bus: + description: + - The bus type. + type: str + required: false + default: VIRTIO + choices: [ "IDE", "VIRTIO"] + image: + description: + - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. + type: str + image_password: + description: + - Password set for the administrative user. + type: str + required: false + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + type: list + elements: str + default: [] + disk_type: + description: + - The disk type of the volume. + type: str + required: false + default: HDD + choices: [ "HDD", "SSD" ] + licence_type: + description: + - The licence type for the volume. This is used when the image is non-standard. + - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)." + type: str + required: false + default: UNKNOWN + count: + description: + - The number of volumes you wish to create. + type: int + required: false + default: 1 + auto_increment: + description: + - Whether or not to increment a single number in the name for created virtual machines. + default: true + type: bool + instance_ids: + description: + - list of instance ids, currently only used when state='absent' to remove instances. + type: list + elements: str + default: [] + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + type: str + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + type: str + required: false + wait: + description: + - wait for the datacenter to be created before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + state: + description: + - create or terminate datacenters + - "The available choices are: C(present), C(absent)." + type: str + required: false + default: 'present' + server: + description: + - Server name to attach the volume to. + type: str + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' +- name: Create multiple volumes + community.general.profitbricks_volume: + datacenter: Tardis One + name: vol%02d + count: 5 + auto_increment: true + wait_timeout: 500 + state: present + +- name: Remove Volumes + community.general.profitbricks_volume: + datacenter: Tardis One + instance_ids: + - 'vol01' + - 'vol02' + wait_timeout: 500 + state: absent +''' + +import re +import time +import traceback + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService, Volume +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.common.text.converters import to_native + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def _create_volume(module, profitbricks, datacenter, name): + size = module.params.get('size') + bus = module.params.get('bus') + image = module.params.get('image') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') + disk_type = module.params.get('disk_type') + licence_type = module.params.get('licence_type') + wait_timeout = module.params.get('wait_timeout') + wait = module.params.get('wait') + + try: + v = Volume( + name=name, + size=size, + bus=bus, + image=image, + image_password=image_password, + ssh_keys=ssh_keys, + disk_type=disk_type, + licence_type=licence_type + ) + + volume_response = profitbricks.create_volume(datacenter, v) + + if wait: + _wait_for_completion(profitbricks, volume_response, + wait_timeout, "_create_volume") + + except Exception as e: + module.fail_json(msg="failed to create the volume: %s" % str(e)) + + return volume_response + + +def _delete_volume(module, profitbricks, datacenter, volume): + try: + profitbricks.delete_volume(datacenter, volume) + except Exception as e: + module.fail_json(msg="failed to remove the volume: %s" % str(e)) + + +def create_volume(module, profitbricks): + """ + Creates a volume. + + This will create a volume in a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was created, false otherwise + """ + datacenter = module.params.get('datacenter') + name = module.params.get('name') + auto_increment = module.params.get('auto_increment') + count = module.params.get('count') + + datacenter_found = False + failed = True + volumes = [] + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + datacenter_found = True + break + + if not datacenter_found: + module.fail_json(msg='datacenter could not be found.') + + if auto_increment: + numbers = set() + count_offset = 1 + + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message, exception=traceback.format_exc()) + + number_range = xrange(count_offset, count_offset + count + len(numbers)) + available_numbers = list(set(number_range).difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] * count + + for name in names: + create_response = _create_volume(module, profitbricks, str(datacenter), name) + volumes.append(create_response) + _attach_volume(module, profitbricks, datacenter, create_response['id']) + failed = False + + results = { + 'failed': failed, + 'volumes': volumes, + 'action': 'create', + 'instance_ids': { + 'instances': [i['id'] for i in volumes], + } + } + + return results + + +def delete_volume(module, profitbricks): + """ + Removes a volume. + + This will create a volume in a datacenter. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was removed, false otherwise + """ + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: + module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') + + datacenter = module.params.get('datacenter') + changed = False + instance_ids = module.params.get('instance_ids') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + for n in instance_ids: + if uuid_match.match(n): + _delete_volume(module, profitbricks, datacenter, n) + changed = True + else: + volumes = profitbricks.list_volumes(datacenter) + for v in volumes['items']: + if n == v['properties']['name']: + volume_id = v['id'] + _delete_volume(module, profitbricks, datacenter, volume_id) + changed = True + + return changed + + +def _attach_volume(module, profitbricks, datacenter, volume): + """ + Attaches a volume. + + This will attach a volume to the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was attached, false otherwise + """ + server = module.params.get('server') + + # Locate UUID for Server + if server: + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + try: + return profitbricks.attach_volume(datacenter, server, volume) + except Exception as e: + module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + server=dict(), + name=dict(), + size=dict(type='int', default=10), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), + image=dict(), + image_password=dict(no_log=True), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), + licence_type=dict(default='UNKNOWN'), + count=dict(type='int', default=1), + auto_increment=dict(type='bool', default=True), + instance_ids=dict(type='list', elements='str', default=[]), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ) + ) + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for running or stopping machines.') + + try: + (changed) = delete_volume(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) + + elif state == 'present': + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required for new instance') + if not module.params.get('name'): + module.fail_json(msg='name parameter is required for new instance') + + try: + (volume_dict_array) = create_volume(module, profitbricks) + module.exit_json(**volume_dict_array) + except Exception as e: + module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py new file mode 100644 index 000000000..75cd73df3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: profitbricks_volume_attachments +short_description: Attach or detach a volume +description: + - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + datacenter: + description: + - The datacenter in which to operate. + type: str + server: + description: + - The name of the server you wish to detach or attach the volume. + type: str + volume: + description: + - The volume name or ID. + type: str + subscription_user: + description: + - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. + type: str + required: false + subscription_password: + description: + - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. + type: str + required: false + wait: + description: + - wait for the operation to complete before returning + required: false + default: true + type: bool + wait_timeout: + description: + - how long before wait gives up, in seconds + type: int + default: 600 + state: + description: + - Indicate desired state of the resource + - "The available choices are: C(present), C(absent)." + type: str + required: false + default: 'present' + +requirements: [ "profitbricks" ] +author: Matt Baldwin (@baldwinSPC) +''' + +EXAMPLES = ''' +- name: Attach a volume + community.general.profitbricks_volume_attachments: + datacenter: Tardis One + server: node002 + volume: vol01 + wait_timeout: 500 + state: present + +- name: Detach a volume + community.general.profitbricks_volume_attachments: + datacenter: Tardis One + server: node002 + volume: vol01 + wait_timeout: 500 + state: absent +''' + +import re +import time + +HAS_PB_SDK = True +try: + from profitbricks.client import ProfitBricksService +except ImportError: + HAS_PB_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +uuid_match = re.compile( + r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) + + +def _wait_for_completion(profitbricks, promise, wait_timeout, msg): + if not promise: + return + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + operation_result = profitbricks.get_request( + request_id=promise['requestId'], + status=True) + + if operation_result['metadata']['status'] == "DONE": + return + elif operation_result['metadata']['status'] == "FAILED": + raise Exception( + 'Request failed to complete ' + msg + ' "' + str( + promise['requestId']) + '" to complete.') + + raise Exception( + 'Timed out waiting for async operation ' + msg + ' "' + str( + promise['requestId'] + ) + '" to complete.') + + +def attach_volume(module, profitbricks): + """ + Attaches a volume. + + This will attach a volume to the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was attached, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + volume = module.params.get('volume') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + # Locate UUID for Volume + if not (uuid_match.match(volume)): + volume_list = profitbricks.list_volumes(datacenter) + for v in volume_list['items']: + if volume == v['properties']['name']: + volume = v['id'] + break + + return profitbricks.attach_volume(datacenter, server, volume) + + +def detach_volume(module, profitbricks): + """ + Detaches a volume. + + This will remove a volume from the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was detached, false otherwise + """ + datacenter = module.params.get('datacenter') + server = module.params.get('server') + volume = module.params.get('volume') + + # Locate UUID for Datacenter + if not (uuid_match.match(datacenter)): + datacenter_list = profitbricks.list_datacenters() + for d in datacenter_list['items']: + dc = profitbricks.get_datacenter(d['id']) + if datacenter == dc['properties']['name']: + datacenter = d['id'] + break + + # Locate UUID for Server + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + # Locate UUID for Volume + if not (uuid_match.match(volume)): + volume_list = profitbricks.list_volumes(datacenter) + for v in volume_list['items']: + if volume == v['properties']['name']: + volume = v['id'] + break + + return profitbricks.detach_volume(datacenter, server, volume) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + datacenter=dict(), + server=dict(), + volume=dict(), + subscription_user=dict(), + subscription_password=dict(no_log=True), + wait=dict(type='bool', default=True), + wait_timeout=dict(type='int', default=600), + state=dict(default='present'), + ) + ) + + if not HAS_PB_SDK: + module.fail_json(msg='profitbricks required for this module') + + if not module.params.get('subscription_user'): + module.fail_json(msg='subscription_user parameter is required') + if not module.params.get('subscription_password'): + module.fail_json(msg='subscription_password parameter is required') + if not module.params.get('datacenter'): + module.fail_json(msg='datacenter parameter is required') + if not module.params.get('server'): + module.fail_json(msg='server parameter is required') + if not module.params.get('volume'): + module.fail_json(msg='volume parameter is required') + + subscription_user = module.params.get('subscription_user') + subscription_password = module.params.get('subscription_password') + + profitbricks = ProfitBricksService( + username=subscription_user, + password=subscription_password) + + state = module.params.get('state') + + if state == 'absent': + try: + (changed) = detach_volume(module, profitbricks) + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) + elif state == 'present': + try: + attach_volume(module, profitbricks) + module.exit_json() + except Exception as e: + module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox.py b/ansible_collections/community/general/plugins/modules/proxmox.py new file mode 100644 index 000000000..315ee601a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox.py @@ -0,0 +1,826 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxmox +short_description: Management of instances in Proxmox VE cluster +description: + - allows you to create/delete/stop instances in Proxmox VE cluster + - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) + - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + password: + description: + - the instance root password + type: str + hostname: + description: + - the instance hostname + - required only for C(state=present) + - must be unique if vmid is not passed + type: str + ostemplate: + description: + - the template for VM creating + - required only for C(state=present) + type: str + disk: + description: + - This option was previously described as "hard disk size in GB for instance" however several formats describing + a lxc mount are permitted. + - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically + choose which storage to allocate from, however new versions enforce the C(:) syntax. + - "Additional options are available by using some combination of the following key-value pairs as a + comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] + [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." + - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3). + type: str + cores: + description: + - Specify number of cores per socket. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). + type: int + cpus: + description: + - numbers of allocated cpus for instance + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). + type: int + memory: + description: + - memory size in MB for instance + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). + type: int + swap: + description: + - swap memory size in MB for instance + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). + type: int + netif: + description: + - specifies network interfaces for the container. As a hash/dictionary defining interfaces. + type: dict + features: + description: + - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options). + - Some features require the use of a privileged container. + type: list + elements: str + version_added: 2.0.0 + mounts: + description: + - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points + type: dict + ip_address: + description: + - specifies the address the container will be assigned + type: str + onboot: + description: + - specifies whether a VM will be started during system bootup + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false). + type: bool + storage: + description: + - target storage + type: str + default: 'local' + cpuunits: + description: + - CPU weight for a VM + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). + type: int + nameserver: + description: + - sets DNS server IP address for a container + type: str + searchdomain: + description: + - sets DNS search domain for a container + type: str + tags: + description: + - List of tags to apply to the container. + - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). + - Tags are only available in Proxmox 7+. + type: list + elements: str + version_added: 6.2.0 + timeout: + description: + - timeout for operations + type: int + default: 30 + force: + description: + - forcing operations + - can be used only with states C(present), C(stopped), C(restarted) + - with C(state=present) force option allow to overwrite existing container + - with states C(stopped) , C(restarted) allow to force stop instance + type: bool + default: false + purge: + description: + - Remove container from all related configurations. + - For example backup jobs, replication jobs, or HA. + - Related ACLs and Firewall entries will always be removed. + - Used with state C(absent). + type: bool + default: false + version_added: 2.3.0 + state: + description: + - Indicate desired state of the instance + type: str + choices: ['present', 'started', 'absent', 'stopped', 'restarted'] + default: present + pubkey: + description: + - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions + type: str + unprivileged: + description: + - Indicate if the container should be unprivileged. + - > + The default value for this parameter is C(false) but that is deprecated + and it will be replaced with C(true) in community.general 7.0.0. + type: bool + description: + description: + - Specify the description for the container. Only used on the configuration web interface. + - This is saved as a comment inside the configuration file. + type: str + version_added: '0.2.0' + hookscript: + description: + - Script that will be executed during various steps in the containers lifetime. + type: str + version_added: '0.2.0' + proxmox_default_behavior: + description: + - As of community.general 4.0.0, various options no longer have default values. + These default values caused problems when users expected different behavior from Proxmox + by default or filled options which caused problems when set. + - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values + are used when the values are not explicitly specified by the user. The new default is C(no_defaults), + which makes sure these options have no defaults. + - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options. + type: str + default: no_defaults + choices: + - compatibility + - no_defaults + version_added: "1.3.0" + clone: + description: + - ID of the container to be cloned. + - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified. + - The type of clone created is defined by the I(clone_type) parameter. + - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4). + type: int + version_added: 4.3.0 + clone_type: + description: + - Type of the clone created. + - C(full) creates a full clone, and I(storage) must be specified. + - C(linked) creates a linked clone, and the cloned container must be a template container. + - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. + I(storage) may be specified, if not it will fall back to the default. + type: str + choices: ['full', 'linked', 'opportunistic'] + default: opportunistic + version_added: 4.3.0 +author: Sergei Antipov (@UnderGreen) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.proxmox.selection + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Create new container with minimal options + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +- name: Create new container with hookscript and description + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + hookscript: 'local:snippets/vm_hook.sh' + description: created with ansible + +- name: Create new container automatically selecting the next available vmid. + community.general.proxmox: + node: 'uk-mc02' + api_user: 'root@pam' + api_password: '1q2w3e' + api_host: 'node1' + password: '123456' + hostname: 'example.org' + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +- name: Create new container with minimal options with force(it will rewrite existing container) + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + force: true + +- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +- name: Create new container with minimal options defining network interface with dhcp + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' + +- name: Create new container with minimal options defining network interface with static ip + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' + +- name: Create new container with minimal options defining a mount with 8GB + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + mounts: '{"mp0":"local:8,mp=/mnt/test/"}' + +- name: Create new container with minimal options defining a cpu core limit + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + cores: 2 + +- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container. + community.general.proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + features: + - nesting=1 + - mount=cifs,nfs + +- name: > + Create a linked clone of the template container with id 100. The newly created container with be a + linked clone, because no storage parameter is defined + community.general.proxmox: + vmid: 201 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + clone: 100 + hostname: clone.example.org + +- name: Create a full clone of the container with id 100 + community.general.proxmox: + vmid: 201 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + clone: 100 + hostname: clone.example.org + storage: local + +- name: Start container + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: started + +- name: > + Start container with mount. You should enter a 90-second timeout because servers + with additional disks take longer to boot + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: started + timeout: 90 + +- name: Stop container + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: stopped + +- name: Stop container with force + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + force: true + state: stopped + +- name: Restart container(stopped or mounted container you can't restart) + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: restarted + +- name: Remove container + community.general.proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: absent +''' + +import re +import time + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) + +VZ_TYPE = None + + +class ProxmoxLxcAnsible(ProxmoxAnsible): + def content_check(self, node, ostemplate, template_store): + return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] + + def is_template_container(self, node, vmid): + """Check if the specified container is a template.""" + proxmox_node = self.proxmox_api.nodes(node) + config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() + return config['template'] + + def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs): + + # Version limited features + minimum_version = { + 'tags': 7, + } + proxmox_node = self.proxmox_api.nodes(node) + + # Remove all empty kwarg entries + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + version = self.version() + pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] + + # Fail on unsupported features + for option, version in minimum_version.items(): + if pve_major_version < version and option in kwargs: + self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_major_version}". + format(option=option, version=version, pve_major_version=pve_major_version)) + + if VZ_TYPE == 'lxc': + kwargs['cpulimit'] = cpus + kwargs['rootfs'] = disk + if 'netif' in kwargs: + kwargs.update(kwargs['netif']) + del kwargs['netif'] + if 'mounts' in kwargs: + kwargs.update(kwargs['mounts']) + del kwargs['mounts'] + if 'pubkey' in kwargs: + if self.version() >= LooseVersion('4.2'): + kwargs['ssh-public-keys'] = kwargs['pubkey'] + del kwargs['pubkey'] + else: + kwargs['cpus'] = cpus + kwargs['disk'] = disk + + # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string + if 'tags' in kwargs: + re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') + for tag in kwargs['tags']: + if not re_tag.match(tag): + self.module.fail_json(msg='%s is not a valid tag' % tag) + kwargs['tags'] = ",".join(kwargs['tags']) + + if clone is not None: + if VZ_TYPE != 'lxc': + self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") + + clone_is_template = self.is_template_container(node, clone) + + # By default, create a full copy only when the cloned container is not a template. + create_full_copy = not clone_is_template + + # Only accept parameters that are compatible with the clone endpoint. + valid_clone_parameters = ['hostname', 'pool', 'description'] + if self.module.params['storage'] is not None and clone_is_template: + # Cloning a template, so create a full copy instead of a linked copy + create_full_copy = True + elif self.module.params['storage'] is None and not clone_is_template: + # Not cloning a template, but also no defined storage. This isn't possible. + self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") + + if self.module.params['clone_type'] == 'linked': + if not clone_is_template: + self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") + # Don't need to do more, by default create_full_copy is set to false already + elif self.module.params['clone_type'] == 'opportunistic': + if not clone_is_template: + # Cloned container is not a template, so we need our 'storage' parameter + valid_clone_parameters.append('storage') + elif self.module.params['clone_type'] == 'full': + create_full_copy = True + valid_clone_parameters.append('storage') + + clone_parameters = {} + + if create_full_copy: + clone_parameters['full'] = '1' + else: + clone_parameters['full'] = '0' + for param in valid_clone_parameters: + if self.module.params[param] is not None: + clone_parameters[param] = self.module.params[param] + + taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) + else: + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) + + while timeout: + if self.api_task_ok(node, taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + def start_instance(self, vm, vmid, timeout): + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + def stop_instance(self, vm, vmid, timeout, force): + if force: + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + else: + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + def umount_instance(self, vm, vmid, timeout): + taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + proxmox_args = dict( + vmid=dict(type='int', required=False), + node=dict(), + pool=dict(), + password=dict(no_log=True), + hostname=dict(), + ostemplate=dict(), + disk=dict(type='str'), + cores=dict(type='int'), + cpus=dict(type='int'), + memory=dict(type='int'), + swap=dict(type='int'), + netif=dict(type='dict'), + mounts=dict(type='dict'), + ip_address=dict(), + onboot=dict(type='bool'), + features=dict(type='list', elements='str'), + storage=dict(default='local'), + cpuunits=dict(type='int'), + nameserver=dict(), + searchdomain=dict(), + timeout=dict(type='int', default=30), + force=dict(type='bool', default=False), + purge=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), + pubkey=dict(type='str'), + unprivileged=dict(type='bool'), + description=dict(type='str'), + hookscript=dict(type='str'), + proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + clone=dict(type='int'), + clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), + tags=dict(type='list', elements='str') + ) + module_args.update(proxmox_args) + + module = AnsibleModule( + argument_spec=module_args, + required_if=[ + ('state', 'present', ['node', 'hostname']), + ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we + # either clone a container or create a new one from a template file. + ], + required_together=[ + ('api_token_id', 'api_token_secret') + ], + required_one_of=[('api_password', 'api_token_id')], + mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template. + ) + + proxmox = ProxmoxLxcAnsible(module) + + global VZ_TYPE + VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc' + + state = module.params['state'] + vmid = module.params['vmid'] + node = module.params['node'] + disk = module.params['disk'] + cpus = module.params['cpus'] + memory = module.params['memory'] + swap = module.params['swap'] + storage = module.params['storage'] + hostname = module.params['hostname'] + if module.params['ostemplate'] is not None: + template_store = module.params['ostemplate'].split(":")[0] + timeout = module.params['timeout'] + clone = module.params['clone'] + + if module.params['unprivileged'] is None: + module.params['unprivileged'] = False + module.deprecate( + 'The default value `false` for the parameter "unprivileged" is deprecated and it will be replaced with `true`', + version='7.0.0', + collection_name='community.general' + ) + + if module.params['proxmox_default_behavior'] == 'compatibility': + old_default_values = dict( + disk="3", + cores=1, + cpus=1, + memory=512, + swap=0, + onboot=False, + cpuunits=1000, + ) + for param, value in old_default_values.items(): + if module.params[param] is None: + module.params[param] = value + + # If vmid not set get the Next VM id from ProxmoxAPI + # If hostname is set get the VM id from ProxmoxAPI + if not vmid and state == 'present': + vmid = proxmox.get_nextvmid() + elif not vmid and hostname: + vmid = proxmox.get_vmid(hostname) + elif not vmid: + module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + + # Create a new container + if state == 'present' and clone is None: + try: + if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + # If no vmid was passed, there cannot be another VM named 'hostname' + if (not module.params['vmid'] and + proxmox.get_vmid(hostname, ignore_missing=True) and + not module.params['force']): + vmid = proxmox.get_vmid(hostname) + module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + elif not proxmox.get_node(node): + module.fail_json(msg="node '%s' not exists in cluster" % node) + elif not proxmox.content_check(node, module.params['ostemplate'], template_store): + module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" + % (module.params['ostemplate'], node, template_store)) + except Exception as e: + module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + + try: + proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, + cores=module.params['cores'], + pool=module.params['pool'], + password=module.params['password'], + hostname=module.params['hostname'], + ostemplate=module.params['ostemplate'], + netif=module.params['netif'], + mounts=module.params['mounts'], + ip_address=module.params['ip_address'], + onboot=ansible_to_proxmox_bool(module.params['onboot']), + cpuunits=module.params['cpuunits'], + nameserver=module.params['nameserver'], + searchdomain=module.params['searchdomain'], + force=ansible_to_proxmox_bool(module.params['force']), + pubkey=module.params['pubkey'], + features=",".join(module.params['features']) if module.params['features'] is not None else None, + unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), + description=module.params['description'], + hookscript=module.params['hookscript'], + tags=module.params['tags']) + + module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) + except Exception as e: + module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + + # Clone a container + elif state == 'present' and clone is not None: + try: + if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + # If no vmid was passed, there cannot be another VM named 'hostname' + if (not module.params['vmid'] and + proxmox.get_vmid(hostname, ignore_missing=True) and + not module.params['force']): + vmid = proxmox.get_vmid(hostname) + module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) + if not proxmox.get_vm(clone, ignore_missing=True): + module.exit_json(changed=False, msg="Container to be cloned does not exist") + except Exception as e: + module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + + try: + proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) + + module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone)) + except Exception as e: + module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + + elif state == 'started': + try: + vm = proxmox.get_vm(vmid) + if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) + + if proxmox.start_instance(vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) + except Exception as e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'stopped': + try: + vm = proxmox.get_vm(vmid) + + if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': + if module.params['force']: + if proxmox.umount_instance(vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + else: + module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " + "You can use force option to umount it.") % vmid) + + if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) + + if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception as e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'restarted': + try: + vm = proxmox.get_vm(vmid) + + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] + if vm_status in ['stopped', 'mounted']: + module.exit_json(changed=False, msg="VM %s is not running" % vmid) + + if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and + proxmox.start_instance(vm, vmid, timeout)): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + except Exception as e: + module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'absent': + if not vmid: + module.exit_json(changed=False, msg='VM with hostname = %s is already absent' % hostname) + try: + vm = proxmox.get_vm(vmid, ignore_missing=True) + if not vm: + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] + if vm_status == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + if vm_status == 'mounted': + module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + + delete_params = {} + + if module.params['purge']: + delete_params['purge'] = 1 + + taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) + + while timeout: + if proxmox.api_task_ok(vm['node'], taskid): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout -= 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + except Exception as e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_disk.py b/ansible_collections/community/general/plugins/modules/proxmox_disk.py new file mode 100644 index 000000000..df6735cc0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_disk.py @@ -0,0 +1,767 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022, Castor Sky (@castorsky) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_disk +short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster +version_added: 5.7.0 +description: + - Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster. +author: "Castor Sky (@castorsky) " +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - The unique name of the VM. + - You can specify either I(name) or I(vmid) or both of them. + type: str + vmid: + description: + - The unique ID of the VM. + - You can specify either I(vmid) or I(name) or both of them. + type: int + disk: + description: + - The disk key (C(unused[n]), C(ide[n]), C(sata[n]), C(scsi[n]) or C(virtio[n])) you want to operate on. + - Disk buses (IDE, SATA and so on) have fixed ranges of C(n) that accepted by Proxmox API. + - > + For IDE: 0-3; + for SCSI: 0-30; + for SATA: 0-5; + for VirtIO: 0-15; + for Unused: 0-255. + type: str + required: true + state: + description: + - Indicates desired state of the disk. + - > + I(state=present) can be used to create, replace disk or update options in existing disk. It will create missing + disk or update options in existing one by default. See the I(create) parameter description to control behavior + of this option. + - Some updates on options (like I(cache)) are not being applied instantly and require VM restart. + - > + Use I(state=detached) to detach existing disk from VM but do not remove it entirely. + When I(state=detached) and disk is C(unused[n]) it will be left in same state (not removed). + - > + I(state=moved) may be used to change backing storage for the disk in bounds of the same VM + or to send the disk to another VM (using the same backing storage). + - > + I(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size + because shrinking disks is not supported by the PVE API and has to be done manually. + - To entirely remove the disk from backing storage use I(state=absent). + type: str + choices: ['present', 'resized', 'detached', 'moved', 'absent'] + default: present + create: + description: + - With I(create) flag you can control behavior of I(state=present). + - When I(create=disabled) it will not create new disk (if not exists) but will update options in existing disk. + - When I(create=regular) it will either create new disk (if not exists) or update options in existing disk. + - When I(create=forced) it will always create new disk (if disk exists it will be detached and left unused). + type: str + choices: ['disabled', 'regular', 'forced'] + default: regular + storage: + description: + - The drive's backing storage. + - Used only when I(state) is C(present). + type: str + size: + description: + - Desired volume size in GB to allocate when I(state=present) (specify I(size) without suffix). + - > + New (or additional) size of volume when I(state=resized). With the C(+) sign + the value is added to the actual size of the volume + and without it, the value is taken as an absolute one. + type: str + bwlimit: + description: + - Override I/O bandwidth limit (in KB/s). + - Used only when I(state=moved). + type: int + delete_moved: + description: + - Delete the original disk after successful copy. + - By default the original disk is kept as unused disk. + - Used only when I(state=moved). + type: bool + target_disk: + description: + - The config key the disk will be moved to on the target VM (for example, C(ide0) or C(scsi1)). + - Default is the source disk key. + - Used only when I(state=moved). + type: str + target_storage: + description: + - Move the disk to this storage when I(state=moved). + - You can move between storages only in scope of one VM. + - Mutually exclusive with I(target_vmid). + - Consider increasing I(timeout) in case of large disk images or slow storage backend. + type: str + target_vmid: + description: + - The (unique) ID of the VM where disk will be placed when I(state=moved). + - You can move disk between VMs only when the same storage is used. + - Mutually exclusive with I(target_vmid). + type: int + timeout: + description: + - Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages. + - Used only when I(state) is C(present) or C(moved). + type: int + default: 600 + aio: + description: + - AIO type to use. + type: str + choices: ['native', 'threads', 'io_uring'] + backup: + description: + - Whether the drive should be included when making backups. + type: bool + bps_max_length: + description: + - Maximum length of total r/w I/O bursts in seconds. + type: int + bps_rd_max_length: + description: + - Maximum length of read I/O bursts in seconds. + type: int + bps_wr_max_length: + description: + - Maximum length of write I/O bursts in seconds. + type: int + cache: + description: + - The drive's cache mode. + type: str + choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync'] + cyls: + description: + - Force the drive's physical geometry to have a specific cylinder count. + type: int + detect_zeroes: + description: + - Control whether to detect and try to optimize writes of zeroes. + type: bool + discard: + description: + - Control whether to pass discard/trim requests to the underlying storage. + type: str + choices: ['ignore', 'on'] + format: + description: + - The drive's backing file's data format. + type: str + choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop'] + heads: + description: + - Force the drive's physical geometry to have a specific head count. + type: int + import_from: + description: + - Import volume from this existing one. + - Volume string format + - C(:/) or C(/) + - Attention! Only root can use absolute paths. + - This parameter is mutually exclusive with I(size). + - Increase I(timeout) parameter when importing large disk images or using slow storage. + type: str + iops: + description: + - Maximum total r/w I/O in operations per second. + - You can specify either total limit or per operation (mutually exclusive with I(iops_rd) and I(iops_wr)). + type: int + iops_max: + description: + - Maximum unthrottled total r/w I/O pool in operations per second. + type: int + iops_max_length: + description: + - Maximum length of total r/w I/O bursts in seconds. + type: int + iops_rd: + description: + - Maximum read I/O in operations per second. + - You can specify either read or total limit (mutually exclusive with I(iops)). + type: int + iops_rd_max: + description: + - Maximum unthrottled read I/O pool in operations per second. + type: int + iops_rd_max_length: + description: + - Maximum length of read I/O bursts in seconds. + type: int + iops_wr: + description: + - Maximum write I/O in operations per second. + - You can specify either write or total limit (mutually exclusive with I(iops)). + type: int + iops_wr_max: + description: + - Maximum unthrottled write I/O pool in operations per second. + type: int + iops_wr_max_length: + description: + - Maximum length of write I/O bursts in seconds. + type: int + iothread: + description: + - Whether to use iothreads for this drive (only for SCSI and VirtIO) + type: bool + mbps: + description: + - Maximum total r/w speed in megabytes per second. + - Can be fractional but use with caution - fractionals less than 1 are not supported officially. + - You can specify either total limit or per operation (mutually exclusive with I(mbps_rd) and I(mbps_wr)). + type: float + mbps_max: + description: + - Maximum unthrottled total r/w pool in megabytes per second. + type: float + mbps_rd: + description: + - Maximum read speed in megabytes per second. + - You can specify either read or total limit (mutually exclusive with I(mbps)). + type: float + mbps_rd_max: + description: + - Maximum unthrottled read pool in megabytes per second. + type: float + mbps_wr: + description: + - Maximum write speed in megabytes per second. + - You can specify either write or total limit (mutually exclusive with I(mbps)). + type: float + mbps_wr_max: + description: + - Maximum unthrottled write pool in megabytes per second. + type: float + media: + description: + - The drive's media type. + type: str + choices: ['cdrom', 'disk'] + queues: + description: + - Number of queues (SCSI only). + type: int + replicate: + description: + - Whether the drive should considered for replication jobs. + type: bool + rerror: + description: + - Read error action. + type: str + choices: ['ignore', 'report', 'stop'] + ro: + description: + - Whether the drive is read-only. + type: bool + scsiblock: + description: + - Whether to use scsi-block for full passthrough of host block device. + - Can lead to I/O errors in combination with low memory or high memory fragmentation on host. + type: bool + secs: + description: + - Force the drive's physical geometry to have a specific sector count. + type: int + serial: + description: + - The drive's reported serial number, url-encoded, up to 20 bytes long. + type: str + shared: + description: + - Mark this locally-managed volume as available on all nodes. + - This option does not share the volume automatically, it assumes it is shared already! + type: bool + snapshot: + description: + - Control qemu's snapshot mode feature. + - If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown. + type: bool + ssd: + description: + - Whether to expose this drive as an SSD, rather than a rotational hard disk. + type: bool + trans: + description: + - Force disk geometry bios translation mode. + type: str + choices: ['auto', 'lba', 'none'] + werror: + description: + - Write error action. + type: str + choices: ['enospc', 'ignore', 'report', 'stop'] + wwn: + description: + - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by C(0x). + type: str +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Create new disk in VM (do not rewrite in case it exists already) + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + name: vm-name + disk: scsi3 + backup: true + cache: none + storage: local-zfs + size: 5 + state: present + +- name: Create new disk in VM (force rewrite in case it exists already) + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + vmid: 101 + disk: scsi3 + format: qcow2 + storage: local + size: 16 + create: forced + state: present + +- name: Update existing disk + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + vmid: 101 + disk: ide0 + backup: false + ro: true + aio: native + state: present + +- name: Grow existing disk + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + vmid: 101 + disk: sata4 + size: +5G + state: resized + +- name: Detach disk (leave it unused) + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + name: vm-name + disk: virtio0 + state: detached + +- name: Move disk to another storage + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_password: secret + vmid: 101 + disk: scsi7 + target_storage: local + format: qcow2 + state: moved + +- name: Move disk from one VM to another + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_token_id: token1 + api_token_secret: some-token-data + vmid: 101 + disk: scsi7 + target_vmid: 201 + state: moved + +- name: Remove disk permanently + community.general.proxmox_disk: + api_host: node1 + api_user: root@pam + api_password: secret + vmid: 101 + disk: scsi4 + state: absent +''' + +RETURN = ''' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 101 +msg: + description: A short message on what the module did. + returned: always + type: str + sample: "Disk scsi3 created in VM 101" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, + ProxmoxAnsible) +from re import compile, match, sub +from time import sleep + + +def disk_conf_str_to_dict(config_string): + config = config_string.split(',') + storage_volume = config.pop(0).split(':') + config.sort() + storage_name = storage_volume[0] + volume_name = storage_volume[1] + config_current = dict( + volume='%s:%s' % (storage_name, volume_name), + storage_name=storage_name, + volume_name=volume_name + ) + + for option in config: + k, v = option.split('=') + config_current[k] = v + + return config_current + + +class ProxmoxDiskAnsible(ProxmoxAnsible): + create_update_fields = [ + 'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length', + 'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max', + 'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max', + 'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max', + 'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot', + 'ssd', 'trans', 'werror', 'wwn' + ] + supported_bus_num_ranges = dict( + ide=range(0, 4), + scsi=range(0, 31), + sata=range(0, 6), + virtio=range(0, 16), + unused=range(0, 256) + ) + + def get_create_attributes(self): + # Sanitize parameters dictionary: + # - Remove not defined args + # - Ensure True and False converted to int. + # - Remove unnecessary parameters + params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields) + params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool))) + return params + + def wait_till_complete_or_timeout(self, node_name, task_id): + timeout = self.module.params['timeout'] + while timeout: + if self.api_task_ok(node_name, task_id): + return True + timeout -= 1 + if timeout <= 0: + return False + sleep(1) + + def create_disk(self, disk, vmid, vm, vm_config): + create = self.module.params['create'] + if create == 'disabled' and disk not in vm_config: + # NOOP + return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid) + + if (create == 'regular' and disk not in vm_config) or (create == 'forced'): + # CREATE + attributes = self.get_create_attributes() + import_string = attributes.pop('import_from', None) + + if import_string: + config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string) + timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s" + ok_str = "Disk %s imported into VM %s" + else: + config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"]) + ok_str = "Disk %s created in VM %s" + timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s" + + for k, v in attributes.items(): + config_str += ',%s=%s' % (k, v) + + disk_config_to_apply = {self.module.params["disk"]: config_str} + + if create in ['disabled', 'regular'] and disk in vm_config: + # UPDATE + disk_config = disk_conf_str_to_dict(vm_config[disk]) + config_str = disk_config["volume"] + ok_str = "Disk %s updated in VM %s" + attributes = self.get_create_attributes() + # 'import_from' fails on disk updates + attributes.pop('import_from', None) + + for k, v in attributes.items(): + config_str += ',%s=%s' % (k, v) + + # Now compare old and new config to detect if changes are needed + for option in ['size', 'storage_name', 'volume', 'volume_name']: + attributes.update({option: disk_config[option]}) + # Values in params are numbers, but strings are needed to compare with disk_config + attributes = dict((k, str(v)) for k, v in attributes.items()) + if disk_config == attributes: + return False, "Disk %s is up to date in VM %s" % (disk, vmid) + + disk_config_to_apply = {self.module.params["disk"]: config_str} + + current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply) + task_success = self.wait_till_complete_or_timeout(vm['node'], current_task_id) + if task_success: + return True, ok_str % (disk, vmid) + else: + self.module.fail_json( + msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1] + ) + + def move_disk(self, disk, vmid, vm, vm_config): + params = dict() + params['disk'] = disk + params['vmid'] = vmid + params['bwlimit'] = self.module.params['bwlimit'] + params['storage'] = self.module.params['target_storage'] + params['target-disk'] = self.module.params['target_disk'] + params['target-vmid'] = self.module.params['target_vmid'] + params['format'] = self.module.params['format'] + params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0 + # Remove not defined args + params = dict((k, v) for k, v in params.items() if v is not None) + + if params.get('storage', False): + disk_config = disk_conf_str_to_dict(vm_config[disk]) + if params['storage'] == disk_config['storage_name']: + return False + + task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params) + task_success = self.wait_till_complete_or_timeout(vm['node'], task_id) + if task_success: + return True + else: + self.module.fail_json( + msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(task_id).log.get()[:1] + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + disk_args = dict( + # Proxmox native parameters + aio=dict(type='str', choices=['native', 'threads', 'io_uring']), + backup=dict(type='bool'), + bps_max_length=dict(type='int'), + bps_rd_max_length=dict(type='int'), + bps_wr_max_length=dict(type='int'), + cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']), + cyls=dict(type='int'), + detect_zeroes=dict(type='bool'), + discard=dict(type='str', choices=['ignore', 'on']), + format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']), + heads=dict(type='int'), + import_from=dict(type='str'), + iops=dict(type='int'), + iops_max=dict(type='int'), + iops_max_length=dict(type='int'), + iops_rd=dict(type='int'), + iops_rd_max=dict(type='int'), + iops_rd_max_length=dict(type='int'), + iops_wr=dict(type='int'), + iops_wr_max=dict(type='int'), + iops_wr_max_length=dict(type='int'), + iothread=dict(type='bool'), + mbps=dict(type='float'), + mbps_max=dict(type='float'), + mbps_rd=dict(type='float'), + mbps_rd_max=dict(type='float'), + mbps_wr=dict(type='float'), + mbps_wr_max=dict(type='float'), + media=dict(type='str', choices=['cdrom', 'disk']), + queues=dict(type='int'), + replicate=dict(type='bool'), + rerror=dict(type='str', choices=['ignore', 'report', 'stop']), + ro=dict(type='bool'), + scsiblock=dict(type='bool'), + secs=dict(type='int'), + serial=dict(type='str'), + shared=dict(type='bool'), + snapshot=dict(type='bool'), + ssd=dict(type='bool'), + trans=dict(type='str', choices=['auto', 'lba', 'none']), + werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']), + wwn=dict(type='str'), + + # Disk moving relates parameters + bwlimit=dict(type='int'), + target_storage=dict(type='str'), + target_disk=dict(type='str'), + target_vmid=dict(type='int'), + delete_moved=dict(type='bool'), + timeout=dict(type='int', default='600'), + + # Module related parameters + name=dict(type='str'), + vmid=dict(type='int'), + disk=dict(type='str', required=True), + storage=dict(type='str'), + size=dict(type='str'), + state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'], + default='present'), + create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'), + ) + + module_args.update(disk_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + required_if=[ + ('create', 'forced', ['storage']), + ('state', 'resized', ['size']), + ], + required_by={ + 'target_disk': 'target_vmid', + 'mbps_max': 'mbps', + 'mbps_rd_max': 'mbps_rd', + 'mbps_wr_max': 'mbps_wr', + 'bps_max_length': 'mbps_max', + 'bps_rd_max_length': 'mbps_rd_max', + 'bps_wr_max_length': 'mbps_wr_max', + 'iops_max': 'iops', + 'iops_rd_max': 'iops_rd', + 'iops_wr_max': 'iops_wr', + 'iops_max_length': 'iops_max', + 'iops_rd_max_length': 'iops_rd_max', + 'iops_wr_max_length': 'iops_wr_max', + }, + supports_check_mode=False, + mutually_exclusive=[ + ('target_vmid', 'target_storage'), + ('mbps', 'mbps_rd'), + ('mbps', 'mbps_wr'), + ('iops', 'iops_rd'), + ('iops', 'iops_wr'), + ('import_from', 'size'), + ] + ) + + proxmox = ProxmoxDiskAnsible(module) + + disk = module.params['disk'] + # Verify disk name has appropriate name + disk_regex = compile(r'^([a-z]+)([0-9]+)$') + disk_bus = sub(disk_regex, r'\1', disk) + disk_number = int(sub(disk_regex, r'\2', disk)) + if disk_bus not in proxmox.supported_bus_num_ranges: + proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus) + elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]: + bus_range = proxmox.supported_bus_num_ranges[disk_bus] + proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1])) + + name = module.params['name'] + state = module.params['state'] + vmid = module.params['vmid'] or proxmox.get_vmid(name) + + # Ensure VM id exists and retrieve its config + vm = None + vm_config = None + try: + vm = proxmox.get_vm(vmid) + vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() + except Exception as e: + proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e))) + + # Do not try to perform actions on missing disk + if disk not in vm_config and state in ['resized', 'moved']: + module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid)) + + if state == 'present': + try: + success, message = proxmox.create_disk(disk, vmid, vm, vm_config) + if success: + module.exit_json(changed=True, vmid=vmid, msg=message) + else: + module.exit_json(changed=False, vmid=vmid, msg=message) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e))) + + elif state == 'detached': + try: + if disk_bus == 'unused': + module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid)) + if disk not in vm_config: + module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid)) + proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0) + module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid)) + except Exception as e: + module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e))) + + elif state == 'moved': + try: + disk_config = disk_conf_str_to_dict(vm_config[disk]) + disk_storage = disk_config["storage_name"] + if proxmox.move_disk(disk, vmid, vm, vm_config): + module.exit_json(changed=True, vmid=vmid, + msg="Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage)) + else: + module.exit_json(changed=False, vmid=vmid, msg="Disk %s already at %s storage" % (disk, disk_storage)) + except Exception as e: + module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) + + elif state == 'resized': + try: + size = module.params['size'] + if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size): + module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size)) + disk_config = disk_conf_str_to_dict(vm_config[disk]) + actual_size = disk_config['size'] + if size == actual_size: + module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already %s size" % (disk, size)) + proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size) + module.exit_json(changed=True, vmid=vmid, msg="Disk %s resized in VM %s" % (disk, vmid)) + except Exception as e: + module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e))) + + elif state == 'absent': + try: + if disk not in vm_config: + module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid)) + proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1) + module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py new file mode 100644 index 000000000..7435695a9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Tristan Le Guern (@tleguern) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_domain_info +short_description: Retrieve information about one or more Proxmox VE domains +version_added: 1.3.0 +description: + - Retrieve information about one or more Proxmox VE domains. +options: + domain: + description: + - Restrict results to a specific authentication realm. + aliases: ['realm', 'name'] + type: str +author: Tristan Le Guern (@tleguern) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module +''' + + +EXAMPLES = ''' +- name: List existing domains + community.general.proxmox_domain_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + register: proxmox_domains + +- name: Retrieve information about the pve domain + community.general.proxmox_domain_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + domain: pve + register: proxmox_domain_pve +''' + + +RETURN = ''' +proxmox_domains: + description: List of authentication domains. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the realm. + returned: on success + type: str + realm: + description: Realm name. + returned: on success + type: str + type: + description: Realm type. + returned: on success + type: str + digest: + description: Realm hash. + returned: on success, can be absent + type: str +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxDomainInfoAnsible(ProxmoxAnsible): + def get_domain(self, realm): + try: + domain = self.proxmox_api.access.domains.get(realm) + except Exception: + self.module.fail_json(msg="Domain '%s' does not exist" % realm) + domain['realm'] = realm + return domain + + def get_domains(self): + domains = self.proxmox_api.access.domains.get() + return domains + + +def proxmox_domain_info_argument_spec(): + return dict( + domain=dict(type='str', aliases=['realm', 'name']), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + domain_info_args = proxmox_domain_info_argument_spec() + module_args.update(domain_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_one_of=[('api_password', 'api_token_id')], + required_together=[('api_token_id', 'api_token_secret')], + supports_check_mode=True + ) + result = dict( + changed=False + ) + + proxmox = ProxmoxDomainInfoAnsible(module) + domain = module.params['domain'] + + if domain: + domains = [proxmox.get_domain(realm=domain)] + else: + domains = proxmox.get_domains() + result['proxmox_domains'] = domains + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py new file mode 100644 index 000000000..531a9dae7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Tristan Le Guern +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_group_info +short_description: Retrieve information about one or more Proxmox VE groups +version_added: 1.3.0 +description: + - Retrieve information about one or more Proxmox VE groups +options: + group: + description: + - Restrict results to a specific group. + aliases: ['groupid', 'name'] + type: str +author: Tristan Le Guern (@tleguern) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module +''' + + +EXAMPLES = ''' +- name: List existing groups + community.general.proxmox_group_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + register: proxmox_groups + +- name: Retrieve information about the admin group + community.general.proxmox_group_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + group: admin + register: proxmox_group_admin +''' + + +RETURN = ''' +proxmox_groups: + description: List of groups. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the group. + returned: on success, can be absent + type: str + groupid: + description: Group name. + returned: on success + type: str + users: + description: List of users in the group. + returned: on success + type: list + elements: str +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxGroupInfoAnsible(ProxmoxAnsible): + def get_group(self, groupid): + try: + group = self.proxmox_api.access.groups.get(groupid) + except Exception: + self.module.fail_json(msg="Group '%s' does not exist" % groupid) + group['groupid'] = groupid + return ProxmoxGroup(group) + + def get_groups(self): + groups = self.proxmox_api.access.groups.get() + return [ProxmoxGroup(group) for group in groups] + + +class ProxmoxGroup: + def __init__(self, group): + self.group = dict() + # Data representation is not the same depending on API calls + for k, v in group.items(): + if k == 'users' and isinstance(v, str): + self.group['users'] = v.split(',') + elif k == 'members': + self.group['users'] = group['members'] + else: + self.group[k] = v + + +def proxmox_group_info_argument_spec(): + return dict( + group=dict(type='str', aliases=['groupid', 'name']), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + group_info_args = proxmox_group_info_argument_spec() + module_args.update(group_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_one_of=[('api_password', 'api_token_id')], + required_together=[('api_token_id', 'api_token_secret')], + supports_check_mode=True + ) + result = dict( + changed=False + ) + + proxmox = ProxmoxGroupInfoAnsible(module) + group = module.params['group'] + + if group: + groups = [proxmox.get_group(groupid=group)] + else: + groups = proxmox.get_groups() + result['proxmox_groups'] = [group.group for group in groups] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py new file mode 100644 index 000000000..1dba5f4ea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py @@ -0,0 +1,1433 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016, Abdoul Bah (@helldorado) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_kvm +short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster +description: + - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. + - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). +author: "Abdoul Bah (@helldorado) " +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + archive: + description: + - Specify a path to an archive to restore (instead of creating or cloning a VM). + type: str + version_added: 6.5.0 + acpi: + description: + - Specify if ACPI should be enabled/disabled. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true). + type: bool + agent: + description: + - Specify if the QEMU Guest Agent should be enabled/disabled. + - Since community.general 5.5.0, this can also be a string instead of a boolean. + This allows to specify values such as C(enabled=1,fstrim_cloned_disks=1). + type: str + args: + description: + - Pass arbitrary arguments to kvm. + - This option is for experts only! + - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of + C(-serial unix:/var/run/qemu-server/.serial,server,nowait). + type: str + autostart: + description: + - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false). + type: bool + balloon: + description: + - Specify the amount of RAM for the VM in MB. + - Using zero disables the balloon driver. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). + type: int + bios: + description: + - Specify the BIOS implementation. + type: str + choices: ['seabios', 'ovmf'] + boot: + description: + - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). + - You can combine to set order. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd). + type: str + bootdisk: + description: + - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) + type: str + cicustom: + description: + - 'cloud-init: Specify custom files to replace the automatically generated ones at start.' + type: str + version_added: 1.3.0 + cipassword: + description: + - 'cloud-init: password of default user to create.' + type: str + version_added: 1.3.0 + citype: + description: + - 'cloud-init: Specifies the cloud-init configuration format.' + - The default depends on the configured operating system type (C(ostype)). + - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows. + type: str + choices: ['nocloud', 'configdrive2'] + version_added: 1.3.0 + ciuser: + description: + - 'cloud-init: username of default user to create.' + type: str + version_added: 1.3.0 + clone: + description: + - Name of VM to be cloned. If I(vmid) is set, I(clone) can take an arbitrary value but is required for initiating the clone. + type: str + cores: + description: + - Specify number of cores per socket. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). + type: int + cpu: + description: + - Specify emulated CPU type. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64). + type: str + cpulimit: + description: + - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. + - If the computer has 2 CPUs, it has total of '2' CPU time + type: int + cpuunits: + description: + - Specify CPU weight for a VM. + - You can disable fair-scheduler configuration by setting this to 0 + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). + type: int + delete: + description: + - Specify a list of settings you want to delete. + type: str + description: + description: + - Specify the description for the VM. Only used on the configuration web interface. + - This is saved as comment inside the configuration file. + type: str + digest: + description: + - Specify if to prevent changes if current configuration file has different SHA1 digest. + - This can be used to prevent concurrent modifications. + type: str + efidisk0: + description: + - Specify a hash/dictionary of EFI disk options. + - Requires I(bios=ovmf) to be set to be able to use it. + type: dict + suboptions: + storage: + description: + - C(storage) is the storage identifier where to create the disk. + type: str + format: + description: + - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, + section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest + version, tables 3 to 14) to find out format supported by the provided storage backend. + type: str + efitype: + description: + - C(efitype) indicates the size of the EFI disk. + - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries. + - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable + Secure Boot + type: str + choices: + - 2m + - 4m + pre_enrolled_keys: + description: + - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware + upon creation or not (0). + - If set to C(1), Secure Boot will also be enabled by default when the VM is created. + type: bool + version_added: 4.5.0 + force: + description: + - Allow to force stop VM. + - Can be used with states C(stopped), C(restarted) and C(absent). + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false). + type: bool + format: + description: + - Target drive's backing file's data format. + - Used only with clone + - Use I(format=unspecified) and I(full=false) for a linked clone. + - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see + U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format + supported by the provided storage backend. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2). + If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified). + type: str + choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] + freeze: + description: + - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). + type: bool + full: + description: + - Create a full copy of all disk. This is always done when you clone a normal VM. + - For VM templates, we try to create a linked clone by default. + - Used only with clone + type: bool + default: true + hostpci: + description: + - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). + - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). + - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). + - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). + - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. + - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. + - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. + type: dict + hotplug: + description: + - Selectively enable hotplug features. + - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). + - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). + type: str + hugepages: + description: + - Enable/disable hugepages memory. + type: str + choices: ['any', '2', '1024'] + ide: + description: + - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). + - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE + Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for + the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + type: dict + ipconfig: + description: + - 'cloud-init: Set the IP configuration.' + - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}'). + - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces. + - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]"). + - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.' + - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. + - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. + - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration. + - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4. + type: dict + version_added: 1.3.0 + keyboard: + description: + - Sets the keyboard layout for VNC server. + type: str + kvm: + description: + - Enable/disable KVM hardware virtualization. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true). + type: bool + localtime: + description: + - Sets the real time clock to local time. + - This is enabled by default if ostype indicates a Microsoft OS. + type: bool + lock: + description: + - Lock/unlock the VM. + type: str + choices: ['migrate', 'backup', 'snapshot', 'rollback'] + machine: + description: + - Specifies the Qemu machine type. + - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) + type: str + memory: + description: + - Memory size in MB for instance. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). + type: int + migrate_downtime: + description: + - Sets maximum tolerated downtime (in seconds) for migrations. + type: int + migrate_speed: + description: + - Sets maximum speed (in MB/s) for migrations. + - A value of 0 is no limit. + type: int + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for C(state=present). + type: str + nameservers: + description: + - 'cloud-init: DNS server IP address(es).' + - If unset, PVE host settings are used. + type: list + elements: str + version_added: 1.3.0 + net: + description: + - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). + - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). + - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). + - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. + - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. + - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. + - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. + type: dict + newid: + description: + - VMID for the clone. Used only with clone. + - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. + type: int + numa: + description: + - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). + - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). + - C(cpus) CPUs accessing this NUMA node. + - C(hostnodes) Host NUMA nodes to use. + - C(memory) Amount of memory this NUMA node provides. + - C(policy) NUMA allocation policy. + type: dict + numa_enabled: + description: + - Enables NUMA. + type: bool + onboot: + description: + - Specifies whether a VM will be started during system bootup. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true). + type: bool + ostype: + description: + - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. + - The l26 is Linux 2.6/3.X Kernel. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26). + type: str + choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris'] + parallel: + description: + - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). + - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. + - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). + type: dict + protection: + description: + - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. + type: bool + reboot: + description: + - Allow reboot. If set to C(true), the VM exit on reboot. + type: bool + revert: + description: + - Revert a pending change. + type: str + sata: + description: + - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE + Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for + the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + type: dict + scsi: + description: + - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE + Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for + the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + type: dict + scsihw: + description: + - Specifies the SCSI controller model. + type: str + choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] + searchdomains: + description: + - 'cloud-init: Sets DNS search domain(s).' + - If unset, PVE host settings are used. + type: list + elements: str + version_added: 1.3.0 + serial: + description: + - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). + - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. + - Values allowed are - C((/dev/.+|socket)). + - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. + type: dict + shares: + description: + - Rets amount of memory shares for auto-ballooning. (0 - 50000). + - The larger the number is, the more memory this VM gets. + - The number is relative to weights of all other running VMs. + - Using 0 disables auto-ballooning, this means no limit. + type: int + skiplock: + description: + - Ignore locks + - Only root is allowed to use this option. + type: bool + smbios: + description: + - Specifies SMBIOS type 1 fields. + type: str + snapname: + description: + - The name of the snapshot. Used only with clone. + type: str + sockets: + description: + - Sets the number of CPU sockets. (1 - N). + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). + type: int + sshkeys: + description: + - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' + type: str + version_added: 1.3.0 + startdate: + description: + - Sets the initial date of the real time clock. + - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). + type: str + startup: + description: + - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). + - Order is a non-negative number defining the general startup order. + - Shutdown in done with reverse ordering. + type: str + state: + description: + - Indicates desired state of the instance. + - If C(current), the current state of the VM will be fetched. You can access it with C(results.status) + type: str + choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] + default: present + storage: + description: + - Target storage for full clone. + type: str + tablet: + description: + - Enables/disables the USB tablet device. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false). + type: bool + tags: + description: + - List of tags to apply to the VM instance. + - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). + - Tags are only available in Proxmox 6+. + type: list + elements: str + version_added: 2.3.0 + target: + description: + - Target node. Only allowed if the original VM is on shared storage. + - Used only with clone + type: str + tdf: + description: + - Enables/disables time drift fix. + type: bool + template: + description: + - Enables/disables the template. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false). + type: bool + timeout: + description: + - Timeout for operations. + type: int + default: 30 + update: + description: + - If C(true), the VM will be updated with new value. + - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters + - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... + - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module. + type: bool + default: false + vcpus: + description: + - Sets number of hotplugged vcpus. + type: int + vga: + description: + - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. + - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std). + type: str + choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] + virtio: + description: + - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). + - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE + Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) + for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. + type: dict + watchdog: + description: + - Creates a virtual hardware watchdog device. + type: str + proxmox_default_behavior: + description: + - As of community.general 4.0.0, various options no longer have default values. + These default values caused problems when users expected different behavior from Proxmox + by default or filled options which caused problems when set. + - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values + are used when the values are not explicitly specified by the user. The new default is C(no_defaults), + which makes sure these options have no defaults. + - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu), + I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets), + I(tablet), I(template), I(vga), options. + type: str + default: no_defaults + choices: + - compatibility + - no_defaults + version_added: "1.3.0" +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.proxmox.selection + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Create new VM with minimal options + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + +- name: Create a VM from archive (backup) + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + archive: backup-storage:backup/vm/140/2023-03-08T06:41:23Z + name: spynal + +- name: Create new VM with minimal options and given vmid + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + vmid: 100 + +- name: Create new VM with two network interface options + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + net: + net0: 'virtio,bridge=vmbr1,rate=200' + net1: 'e1000,bridge=vmbr2' + +- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + net: + net0: 'virtio,bridge=vmbr1,rate=200' + virtio: + virtio0: 'VMs_LVM:10' + virtio1: 'VMs:2,format=qcow2' + virtio2: 'VMs:5,format=raw' + cores: 4 + vcpus: 2 + +- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + sata: + sata0: 'VMs_LVM:10,format=raw' + bios: ovmf + efidisk0: + storage: VMs_LVM_thin + format: raw + efitype: 4m + pre_enrolled_keys: false + +- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + sata: + sata0: 'VMs_LVM:10,format=raw' + bios: ovmf + efidisk0: + storage: VMs_LVM + format: raw + efitype: 4m + pre_enrolled_keys: 1 + +- name: > + Clone VM with only source VM name. + The VM source is spynal. + The target VM name is zavala + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + clone: spynal + name: zavala + node: sabrewulf + storage: VMs + format: qcow2 + timeout: 500 + +- name: > + Create linked clone VM with only source VM name. + The VM source is spynal. + The target VM name is zavala + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + clone: spynal + name: zavala + node: sabrewulf + storage: VMs + full: false + format: unspecified + timeout: 500 + +- name: Clone VM with source vmid and target newid and raw format + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + clone: arbitrary_name + vmid: 108 + newid: 152 + name: zavala + node: sabrewulf + storage: LVM_STO + format: raw + timeout: 300 + +- name: Create new VM and lock it for snapshot + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + lock: snapshot + +- name: Create new VM and set protection to disable the remove VM and remove disk operations + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + protection: true + +- name: Create new VM using cloud-init with a username and password + community.general.proxmox_kvm: + node: sabrewulf + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + ide: + ide2: 'local:cloudinit,format=qcow2' + ciuser: mylinuxuser + cipassword: supersecret + searchdomains: 'mydomain.internal' + nameservers: 1.1.1.1 + net: + net0: 'virtio,bridge=vmbr1,tag=77' + ipconfig: + ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1' + +- name: Create new VM using Cloud-Init with an ssh key + community.general.proxmox_kvm: + node: sabrewulf + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + ide: + ide2: 'local:cloudinit,format=qcow2' + sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+' + searchdomains: 'mydomain.internal' + nameservers: + - '1.1.1.1' + - '8.8.8.8' + net: + net0: 'virtio,bridge=vmbr1,tag=77' + ipconfig: + ipconfig0: 'ip=192.168.1.1/24' + +- name: Start VM + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + state: started + +- name: Stop VM + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + state: stopped + +- name: Stop VM with force + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + state: stopped + force: true + +- name: Restart VM + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + state: restarted + +- name: Remove VM + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + state: absent + +- name: Update VM configuration + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + cores: 8 + memory: 16384 + update: true + +- name: Delete QEMU parameters + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + delete: 'args,template,cpulimit' + +- name: Revert a pending change + community.general.proxmox_kvm: + api_user: root@pam + api_password: secret + api_host: helldorado + name: spynal + node: sabrewulf + revert: 'template,cpulimit' +''' + +RETURN = ''' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +status: + description: The current virtual machine status. + returned: success, not clone, not absent, not update + type: str + sample: running +msg: + description: A short message + returned: always + type: str + sample: "VM kropta with vmid = 110 is running" +''' + +import re +import time +from ansible.module_utils.six.moves.urllib.parse import quote + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.parsing.convert_bool import boolean + + +def parse_mac(netstr): + return re.search('=(.*?),', netstr).group(1) + + +def parse_dev(devstr): + return re.search('(.*?)(,|$)', devstr).group(1) + + +class ProxmoxKvmAnsible(ProxmoxAnsible): + def get_vminfo(self, node, vmid, **kwargs): + global results + results = {} + mac = {} + devices = {} + try: + vm = self.proxmox_api.nodes(node).qemu(vmid).config.get() + except Exception as e: + self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + for k in list(kwargs.keys()): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Split information by type + re_net = re.compile(r'net[0-9]') + re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]') + for k in kwargs.keys(): + if re_net.match(k): + mac[k] = parse_mac(vm[k]) + elif re_dev.match(k): + devices[k] = parse_dev(vm[k]) + + results['mac'] = mac + results['devices'] = devices + results['vmid'] = int(vmid) + + def settings(self, vmid, node, **kwargs): + proxmox_node = self.proxmox_api.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + + return proxmox_node.qemu(vmid).config.set(**kwargs) is None + + def wait_for_task(self, node, taskid): + timeout = self.module.params['timeout'] + + while timeout: + if self.api_task_ok(node, taskid): + # Wait an extra second as the API can be a ahead of the hypervisor + time.sleep(1) + return True + timeout = timeout - 1 + if timeout == 0: + break + time.sleep(1) + return False + + def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs): + # Available only in PVE 4 + only_v4 = ['force', 'protection', 'skiplock'] + only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] + + # valide clone parameters + valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] + clone_params = {} + # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. + vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) + + proxmox_node = self.proxmox_api.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) + kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) + + version = self.version() + pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] + + # The features work only on PVE 4+ + if pve_major_version < 4: + for p in only_v4: + if p in kwargs: + del kwargs[p] + + # The features work only on PVE 6 + if pve_major_version < 6: + for p in only_v6: + if p in kwargs: + del kwargs[p] + + # 'sshkeys' param expects an urlencoded string + if 'sshkeys' in kwargs: + urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') + kwargs['sshkeys'] = str(urlencoded_ssh_keys) + + # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface + # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create" + if update: + if 'virtio' in kwargs: + del kwargs['virtio'] + if 'sata' in kwargs: + del kwargs['sata'] + if 'scsi' in kwargs: + del kwargs['scsi'] + if 'ide' in kwargs: + del kwargs['ide'] + if 'efidisk0' in kwargs: + del kwargs['efidisk0'] + if 'net' in kwargs: + del kwargs['net'] + if 'force' in kwargs: + del kwargs['force'] + if 'pool' in kwargs: + del kwargs['pool'] + + # Check that the bios option is set to ovmf if the efidisk0 option is present + if 'efidisk0' in kwargs: + if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']): + self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ') + + # Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect + if 'efidisk0' in kwargs: + efidisk0_str = '' + # Regexp to catch underscores in keys name, to replace them after by hypens + hyphen_re = re.compile(r'_') + # If present, the storage definition should be the first argument + if 'storage' in kwargs['efidisk0']: + efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,' + kwargs['efidisk0'].pop('storage') + # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key + # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys) + efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items() + if 'storage' != k]) + kwargs['efidisk0'] = efidisk0_str + + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] + for k in list(kwargs.keys()): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + if 'agent' in kwargs: + try: + # The API also allows booleans instead of e.g. `enabled=1` for backward-compatibility. + kwargs['agent'] = int(boolean(kwargs['agent'], strict=True)) + except TypeError: + # Not something that Ansible would parse as a boolean. + pass + + # Rename numa_enabled to numa, according the API documentation + if 'numa_enabled' in kwargs: + kwargs['numa'] = kwargs['numa_enabled'] + del kwargs['numa_enabled'] + + # PVE api expects strings for the following params + if 'nameservers' in self.module.params: + nameservers = self.module.params.pop('nameservers') + if nameservers: + kwargs['nameserver'] = ' '.join(nameservers) + if 'searchdomains' in self.module.params: + searchdomains = self.module.params.pop('searchdomains') + if searchdomains: + kwargs['searchdomain'] = ' '.join(searchdomains) + + # VM tags are expected to be valid and presented as a comma/semi-colon delimited string + if 'tags' in kwargs: + re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') + for tag in kwargs['tags']: + if not re_tag.match(tag): + self.module.fail_json(msg='%s is not a valid tag' % tag) + kwargs['tags'] = ",".join(kwargs['tags']) + + # -args and skiplock require root@pam user - but can not use api tokens + if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None: + if not update and self.module.params['proxmox_default_behavior'] == 'compatibility': + kwargs['args'] = vm_args + elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: + kwargs['args'] = self.module.params['args'] + elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None: + self.module.fail_json(msg='args parameter require root@pam user. ') + + if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None: + self.module.fail_json(msg='skiplock parameter require root@pam user. ') + + if update: + if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: + return True + else: + return False + elif self.module.params['clone'] is not None: + for param in valid_clone_params: + if self.module.params[param] is not None: + clone_params[param] = self.module.params[param] + clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) + taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) + else: + taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) + + if not self.wait_for_task(node, taskid): + self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + return False + return True + + def start_vm(self, vm): + vmid = vm['vmid'] + proxmox_node = self.proxmox_api.nodes(vm['node']) + taskid = proxmox_node.qemu(vmid).status.start.post() + if not self.wait_for_task(vm['node'], taskid): + self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + return False + return True + + def stop_vm(self, vm, force): + vmid = vm['vmid'] + proxmox_node = self.proxmox_api.nodes(vm['node']) + taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0)) + if not self.wait_for_task(vm['node'], taskid): + self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + return False + return True + + +def main(): + module_args = proxmox_auth_argument_spec() + kvm_args = dict( + archive=dict(type='str'), + acpi=dict(type='bool'), + agent=dict(type='str'), + args=dict(type='str'), + autostart=dict(type='bool'), + balloon=dict(type='int'), + bios=dict(choices=['seabios', 'ovmf']), + boot=dict(type='str'), + bootdisk=dict(type='str'), + cicustom=dict(type='str'), + cipassword=dict(type='str', no_log=True), + citype=dict(type='str', choices=['nocloud', 'configdrive2']), + ciuser=dict(type='str'), + clone=dict(type='str'), + cores=dict(type='int'), + cpu=dict(type='str'), + cpulimit=dict(type='int'), + cpuunits=dict(type='int'), + delete=dict(type='str'), + description=dict(type='str'), + digest=dict(type='str'), + efidisk0=dict(type='dict', + options=dict( + storage=dict(type='str'), + format=dict(type='str'), + efitype=dict(type='str', choices=['2m', '4m']), + pre_enrolled_keys=dict(type='bool'), + )), + force=dict(type='bool'), + format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']), + freeze=dict(type='bool'), + full=dict(type='bool', default=True), + hostpci=dict(type='dict'), + hotplug=dict(type='str'), + hugepages=dict(choices=['any', '2', '1024']), + ide=dict(type='dict'), + ipconfig=dict(type='dict'), + keyboard=dict(type='str'), + kvm=dict(type='bool'), + localtime=dict(type='bool'), + lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), + machine=dict(type='str'), + memory=dict(type='int'), + migrate_downtime=dict(type='int'), + migrate_speed=dict(type='int'), + name=dict(type='str'), + nameservers=dict(type='list', elements='str'), + net=dict(type='dict'), + newid=dict(type='int'), + node=dict(), + numa=dict(type='dict'), + numa_enabled=dict(type='bool'), + onboot=dict(type='bool'), + ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']), + parallel=dict(type='dict'), + pool=dict(type='str'), + protection=dict(type='bool'), + reboot=dict(type='bool'), + revert=dict(type='str'), + sata=dict(type='dict'), + scsi=dict(type='dict'), + scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), + serial=dict(type='dict'), + searchdomains=dict(type='list', elements='str'), + shares=dict(type='int'), + skiplock=dict(type='bool'), + smbios=dict(type='str'), + snapname=dict(type='str'), + sockets=dict(type='int'), + sshkeys=dict(type='str', no_log=False), + startdate=dict(type='str'), + startup=dict(), + state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), + storage=dict(type='str'), + tablet=dict(type='bool'), + tags=dict(type='list', elements='str'), + target=dict(type='str'), + tdf=dict(type='bool'), + template=dict(type='bool'), + timeout=dict(type='int', default=30), + update=dict(type='bool', default=False), + vcpus=dict(type='int'), + vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), + virtio=dict(type='dict'), + vmid=dict(type='int'), + watchdog=dict(), + proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + ) + module_args.update(kvm_args) + + module = AnsibleModule( + argument_spec=module_args, + mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + required_if=[('state', 'present', ['node'])], + ) + + clone = module.params['clone'] + cpu = module.params['cpu'] + cores = module.params['cores'] + delete = module.params['delete'] + memory = module.params['memory'] + name = module.params['name'] + newid = module.params['newid'] + node = module.params['node'] + revert = module.params['revert'] + sockets = module.params['sockets'] + state = module.params['state'] + update = bool(module.params['update']) + vmid = module.params['vmid'] + validate_certs = module.params['validate_certs'] + + if module.params['proxmox_default_behavior'] == 'compatibility': + old_default_values = dict( + acpi=True, + autostart=False, + balloon=0, + boot='cnd', + cores=1, + cpu='kvm64', + cpuunits=1000, + format='qcow2', + kvm=True, + memory=512, + ostype='l26', + sockets=1, + tablet=False, + template=False, + vga='std', + ) + for param, value in old_default_values.items(): + if module.params[param] is None: + module.params[param] = value + + if module.params['format'] == 'unspecified': + module.params['format'] = None + + proxmox = ProxmoxKvmAnsible(module) + + # If vmid is not defined then retrieve its value from the vm name, + # the cloned vm name or retrieve the next free VM id from ProxmoxAPI. + if not vmid: + if state == 'present' and not update and not clone and not delete and not revert: + try: + vmid = proxmox.get_nextvmid() + except Exception: + module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) + else: + clone_target = clone or name + vmid = proxmox.get_vmid(clone_target, ignore_missing=True) + + if clone is not None: + # If newid is not defined then retrieve the next free id from ProxmoxAPI + if not newid: + try: + newid = proxmox.get_nextvmid() + except Exception: + module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) + + # Ensure source VM name exists when cloning + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % clone) + + # Ensure source VM id exists when cloning + proxmox.get_vm(vmid) + + # Ensure the chosen VM name doesn't already exist when cloning + existing_vmid = proxmox.get_vmid(name, ignore_missing=True) + if existing_vmid: + module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name) + + # Ensure the chosen VM id doesn't already exist when cloning + if proxmox.get_vm(newid, ignore_missing=True): + module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) + + if delete is not None: + try: + proxmox.settings(vmid, node, delete=delete) + module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) + + if revert is not None: + try: + proxmox.settings(vmid, node, revert=revert) + module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) + + if state == 'present': + try: + if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone): + module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) + elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone): + module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name) + elif not node: + module.fail.json(msg='node is mandatory for creating/updating VM') + elif update and not any([vmid, name]): + module.fail_json(msg='vmid or name is mandatory for updating VM') + elif not proxmox.get_node(node): + module.fail_json(msg="node '%s' does not exist in cluster" % node) + + proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, + archive=module.params['archive'], + acpi=module.params['acpi'], + agent=module.params['agent'], + autostart=module.params['autostart'], + balloon=module.params['balloon'], + bios=module.params['bios'], + boot=module.params['boot'], + bootdisk=module.params['bootdisk'], + cicustom=module.params['cicustom'], + cipassword=module.params['cipassword'], + citype=module.params['citype'], + ciuser=module.params['ciuser'], + cpulimit=module.params['cpulimit'], + cpuunits=module.params['cpuunits'], + description=module.params['description'], + digest=module.params['digest'], + efidisk0=module.params['efidisk0'], + force=module.params['force'], + freeze=module.params['freeze'], + hostpci=module.params['hostpci'], + hotplug=module.params['hotplug'], + hugepages=module.params['hugepages'], + ide=module.params['ide'], + ipconfig=module.params['ipconfig'], + keyboard=module.params['keyboard'], + kvm=module.params['kvm'], + localtime=module.params['localtime'], + lock=module.params['lock'], + machine=module.params['machine'], + migrate_downtime=module.params['migrate_downtime'], + migrate_speed=module.params['migrate_speed'], + net=module.params['net'], + numa=module.params['numa'], + numa_enabled=module.params['numa_enabled'], + onboot=module.params['onboot'], + ostype=module.params['ostype'], + parallel=module.params['parallel'], + pool=module.params['pool'], + protection=module.params['protection'], + reboot=module.params['reboot'], + sata=module.params['sata'], + scsi=module.params['scsi'], + scsihw=module.params['scsihw'], + serial=module.params['serial'], + shares=module.params['shares'], + skiplock=module.params['skiplock'], + smbios1=module.params['smbios'], + snapname=module.params['snapname'], + sshkeys=module.params['sshkeys'], + startdate=module.params['startdate'], + startup=module.params['startup'], + tablet=module.params['tablet'], + tags=module.params['tags'], + target=module.params['target'], + tdf=module.params['tdf'], + template=module.params['template'], + vcpus=module.params['vcpus'], + vga=module.params['vga'], + virtio=module.params['virtio'], + watchdog=module.params['watchdog']) + + if not clone: + proxmox.get_vminfo(node, vmid, + ide=module.params['ide'], + net=module.params['net'], + sata=module.params['sata'], + scsi=module.params['scsi'], + virtio=module.params['virtio']) + if update: + module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) + elif clone is not None: + module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + else: + module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + except Exception as e: + if update: + module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) + elif clone is not None: + module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) + else: + module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) + + elif state == 'started': + status = {} + try: + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + vm = proxmox.get_vm(vmid) + status['status'] = vm['status'] + if vm['status'] == 'running': + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) + + if proxmox.start_vm(vm): + module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) + except Exception as e: + module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) + + elif state == 'stopped': + status = {} + try: + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + + vm = proxmox.get_vm(vmid) + + status['status'] = vm['status'] + if vm['status'] == 'stopped': + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) + + if proxmox.stop_vm(vm, force=module.params['force']): + module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) + except Exception as e: + module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) + + elif state == 'restarted': + status = {} + try: + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + + vm = proxmox.get_vm(vmid) + status['status'] = vm['status'] + if vm['status'] == 'stopped': + module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) + + if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm): + module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) + except Exception as e: + module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) + + elif state == 'absent': + status = {} + if not vmid: + module.exit_json(changed=False, msg='VM with name = %s is already absent' % name) + try: + vm = proxmox.get_vm(vmid, ignore_missing=True) + if not vm: + module.exit_json(changed=False, vmid=vmid) + + proxmox_node = proxmox.proxmox_api.nodes(vm['node']) + status['status'] = vm['status'] + if vm['status'] == 'running': + if module.params['force']: + proxmox.stop_vm(vm, True) + else: + module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid) + taskid = proxmox_node.qemu.delete(vmid) + if not proxmox.wait_for_task(vm['node'], taskid): + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % + proxmox_node.tasks(taskid).log.get()[:1]) + else: + module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) + except Exception as e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) + + elif state == 'current': + status = {} + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + vm = proxmox.get_vm(vmid) + if not name: + name = vm.get('name', '(unnamed)') + current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] + status['status'] = current + if status: + module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/proxmox_nic.py new file mode 100644 index 000000000..26d07c7ec --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_nic.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021, Lammert Hellinga (@Kogelvis) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_nic +short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster +version_added: 3.1.0 +description: + - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. +author: "Lammert Hellinga (@Kogelvis) " +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + bridge: + description: + - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). + type: str + firewall: + description: + - Whether this interface should be protected by the firewall. + type: bool + default: false + interface: + description: + - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). + type: str + required: true + link_down: + description: + - Whether this interface should be disconnected (like pulling the plug). + type: bool + default: false + mac: + description: + - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. + - When not specified this module will keep the MAC address the same when changing an existing interface. + type: str + model: + description: + - The NIC emulator model. + type: str + choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'] + default: virtio + mtu: + description: + - Force MTU, for C(virtio) model only, setting will be ignored otherwise. + - Set to C(1) to use the bridge MTU. + - Value should be C(1 ≤ n ≤ 65520). + type: int + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for I(state=present). + type: str + queues: + description: + - Number of packet queues to be used on the device. + - Value should be C(0 ≤ n ≤ 16). + type: int + rate: + description: + - Rate limit in MBps (MegaBytes per second) as floating point number. + type: float + state: + description: + - Indicates desired state of the NIC. + type: str + choices: ['present', 'absent'] + default: present + tag: + description: + - VLAN tag to apply to packets on this interface. + - Value should be C(1 ≤ n ≤ 4094). + type: int + trunks: + description: + - List of VLAN trunks to pass through this interface. + type: list + elements: int + vmid: + description: + - Specifies the instance ID. + type: int +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Create NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + bridge: vmbr0 + tag: 3 + +- name: Create NIC net0 targeting the vm by id + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + vmid: 103 + interface: net0 + bridge: vmbr0 + mac: "12:34:56:C0:FF:EE" + firewall: true + +- name: Delete NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + state: absent +''' + +RETURN = ''' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +msg: + description: A short message + returned: always + type: str + sample: "Nic net0 unchanged on VM with vmid 103" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxNicAnsible(ProxmoxAnsible): + def update_nic(self, vmid, interface, model, **kwargs): + vm = self.get_vm(vmid) + + try: + vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() + except Exception as e: + self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + if interface in vminfo: + # Convert the current config to a dictionary + config = vminfo[interface].split(',') + config.sort() + + config_current = {} + + for i in config: + kv = i.split('=') + try: + config_current[kv[0]] = kv[1] + except IndexError: + config_current[kv[0]] = '' + + # determine the current model nic and mac-address + models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', + 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] + current_model = set(models) & set(config_current.keys()) + current_model = current_model.pop() + current_mac = config_current[current_model] + + # build nic config string + config_provided = "{0}={1}".format(model, current_mac) + else: + config_provided = model + + if kwargs['mac']: + config_provided = "{0}={1}".format(model, kwargs['mac']) + + if kwargs['bridge']: + config_provided += ",bridge={0}".format(kwargs['bridge']) + + if kwargs['firewall']: + config_provided += ",firewall=1" + + if kwargs['link_down']: + config_provided += ',link_down=1' + + if kwargs['mtu']: + config_provided += ",mtu={0}".format(kwargs['mtu']) + if model != 'virtio': + self.module.warn( + 'Ignoring MTU for nic {0} on VM with vmid {1}, ' + 'model should be set to \'virtio\': '.format(interface, vmid)) + + if kwargs['queues']: + config_provided += ",queues={0}".format(kwargs['queues']) + + if kwargs['rate']: + config_provided += ",rate={0}".format(kwargs['rate']) + + if kwargs['tag']: + config_provided += ",tag={0}".format(kwargs['tag']) + + if kwargs['trunks']: + config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) + + net = {interface: config_provided} + vm = self.get_vm(vmid) + + if ((interface not in vminfo) or (vminfo[interface] != config_provided)): + if not self.module.check_mode: + self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net) + return True + + return False + + def delete_nic(self, vmid, interface): + vm = self.get_vm(vmid) + vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() + + if interface in vminfo: + if not self.module.check_mode: + self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(delete=interface) + return True + + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + nic_args = dict( + bridge=dict(type='str'), + firewall=dict(type='bool', default=False), + interface=dict(type='str', required=True), + link_down=dict(type='bool', default=False), + mac=dict(type='str'), + model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', + 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), + mtu=dict(type='int'), + name=dict(type='str'), + queues=dict(type='int'), + rate=dict(type='float'), + state=dict(default='present', choices=['present', 'absent']), + tag=dict(type='int'), + trunks=dict(type='list', elements='int'), + vmid=dict(type='int'), + ) + module_args.update(nic_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + supports_check_mode=True, + ) + + proxmox = ProxmoxNicAnsible(module) + + interface = module.params['interface'] + model = module.params['model'] + name = module.params['name'] + state = module.params['state'] + vmid = module.params['vmid'] + + # If vmid is not defined then retrieve its value from the vm name, + if not vmid: + vmid = proxmox.get_vmid(name) + + # Ensure VM id exists + proxmox.get_vm(vmid) + + if state == 'present': + try: + if proxmox.update_nic(vmid, interface, model, + bridge=module.params['bridge'], + firewall=module.params['firewall'], + link_down=module.params['link_down'], + mac=module.params['mac'], + mtu=module.params['mtu'], + queues=module.params['queues'], + rate=module.params['rate'], + tag=module.params['tag'], + trunks=module.params['trunks']): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + elif state == 'absent': + try: + if proxmox.delete_nic(vmid, interface): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/proxmox_snap.py new file mode 100644 index 000000000..0c17f8376 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_snap.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020, Jeffrey van Pelt (@Thulium-Drake) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_snap +short_description: Snapshot management of instances in Proxmox VE cluster +version_added: 2.0.0 +description: + - Allows you to create/delete/restore snapshots from instances in Proxmox VE cluster. + - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + hostname: + description: + - The instance name. + type: str + vmid: + description: + - The instance id. + - If not set, will be fetched from PromoxAPI based on the hostname. + type: str + state: + description: + - Indicate desired state of the instance snapshot. + - The C(rollback) value was added in community.general 4.8.0. + choices: ['present', 'absent', 'rollback'] + default: present + type: str + force: + description: + - For removal from config file, even if removing disk snapshot fails. + default: false + type: bool + unbind: + description: + - This option only applies to LXC containers. + - Allows to snapshot a container even if it has configured mountpoints. + - Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration. + - If running, the container will be stopped and restarted to apply config changes. + - Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either. + - See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details. + default: false + type: bool + version_added: 5.7.0 + vmstate: + description: + - Snapshot includes RAM. + default: false + type: bool + description: + description: + - Specify the description for the snapshot. Only used on the configuration web interface. + - This is saved as a comment inside the configuration file. + type: str + timeout: + description: + - Timeout for operations. + default: 30 + type: int + snapname: + description: + - Name of the snapshot that has to be created/deleted/restored. + default: 'ansible_snap' + type: str + +notes: + - Requires proxmoxer and requests modules on host. These modules can be installed with pip. +requirements: [ "proxmoxer", "python >= 2.7", "requests" ] +author: Jeffrey van Pelt (@Thulium-Drake) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes +''' + +EXAMPLES = r''' +- name: Create new container snapshot + community.general.proxmox_snap: + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + vmid: 100 + state: present + snapname: pre-updates + +- name: Create new snapshot for a container with configured mountpoints + community.general.proxmox_snap: + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + vmid: 100 + state: present + unbind: true # requires root@pam+password auth, API tokens are not supported + snapname: pre-updates + +- name: Remove container snapshot + community.general.proxmox_snap: + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + vmid: 100 + state: absent + snapname: pre-updates + +- name: Rollback container snapshot + community.general.proxmox_snap: + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + vmid: 100 + state: rollback + snapname: pre-updates +''' + +RETURN = r'''#''' + +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxSnapAnsible(ProxmoxAnsible): + def snapshot(self, vm, vmid): + return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot + + def vmconfig(self, vm, vmid): + return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config + + def vmstatus(self, vm, vmid): + return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status + + def _container_mp_get(self, vm, vmid): + cfg = self.vmconfig(vm, vmid).get() + mountpoints = {} + for key, value in cfg.items(): + if key.startswith('mp'): + mountpoints[key] = value + return mountpoints + + def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus): + # shutdown container if running + if vmstatus == 'running': + self.shutdown_instance(vm, vmid, timeout) + # delete all mountpoints configs + self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints)) + + def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus): + # NOTE: requires auth as `root@pam`, API tokens are not supported + # see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config + # restore original config + self.vmconfig(vm, vmid).put(**mountpoints) + # start container (if was running before snap) + if vmstatus == 'running': + self.start_instance(vm, vmid, timeout) + + def start_instance(self, vm, vmid, timeout): + taskid = self.vmstatus(vm, vmid).start.post() + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + time.sleep(1) + return False + + def shutdown_instance(self, vm, vmid, timeout): + taskid = self.vmstatus(vm, vmid).shutdown.post() + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + timeout -= 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + time.sleep(1) + return False + + def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind): + if self.module.check_mode: + return True + + if vm['type'] == 'lxc': + if unbind is True: + # check if credentials will work + # WARN: it is crucial this check runs here! + # The correct permissions are required only to reconfig mounts. + # Not checking now would allow to remove the configuration BUT + # fail later, leaving the container in a misconfigured state. + if ( + self.module.params['api_user'] != 'root@pam' + or not self.module.params['api_password'] + ): + self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.') + return False + mountpoints = self._container_mp_get(vm, vmid) + vmstatus = self.vmstatus(vm, vmid).current().get()['status'] + if mountpoints: + self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus) + taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description) + else: + taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate)) + + while timeout: + if self.api_task_ok(vm['node'], taskid): + if vm['type'] == 'lxc' and unbind is True and mountpoints: + self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus) + return True + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + timeout -= 1 + if vm['type'] == 'lxc' and unbind is True and mountpoints: + self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus) + return False + + def snapshot_remove(self, vm, vmid, timeout, snapname, force): + if self.module.check_mode: + return True + + taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force)) + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + timeout -= 1 + return False + + def snapshot_rollback(self, vm, vmid, timeout, snapname): + if self.module.check_mode: + return True + + taskid = self.snapshot(vm, vmid)(snapname).post("rollback") + while timeout: + if self.api_task_ok(vm['node'], taskid): + return True + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' % + self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + timeout -= 1 + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + snap_args = dict( + vmid=dict(required=False), + hostname=dict(), + timeout=dict(type='int', default=30), + state=dict(default='present', choices=['present', 'absent', 'rollback']), + description=dict(type='str'), + snapname=dict(type='str', default='ansible_snap'), + force=dict(type='bool', default=False), + unbind=dict(type='bool', default=False), + vmstate=dict(type='bool', default=False), + ) + module_args.update(snap_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + proxmox = ProxmoxSnapAnsible(module) + + state = module.params['state'] + vmid = module.params['vmid'] + hostname = module.params['hostname'] + description = module.params['description'] + snapname = module.params['snapname'] + timeout = module.params['timeout'] + force = module.params['force'] + unbind = module.params['unbind'] + vmstate = module.params['vmstate'] + + # If hostname is set get the VM id from ProxmoxAPI + if not vmid and hostname: + vmid = proxmox.get_vmid(hostname) + elif not vmid: + module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + + vm = proxmox.get_vm(vmid) + + if state == 'present': + try: + for i in proxmox.snapshot(vm, vmid).get(): + if i['name'] == snapname: + module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname) + + if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind): + if module.check_mode: + module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname) + else: + module.exit_json(changed=True, msg="Snapshot %s created" % snapname) + + except Exception as e: + module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) + + elif state == 'absent': + try: + snap_exist = False + + for i in proxmox.snapshot(vm, vmid).get(): + if i['name'] == snapname: + snap_exist = True + continue + + if not snap_exist: + module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) + else: + if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force): + if module.check_mode: + module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname) + else: + module.exit_json(changed=True, msg="Snapshot %s removed" % snapname) + + except Exception as e: + module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) + elif state == 'rollback': + try: + snap_exist = False + + for i in proxmox.snapshot(vm, vmid).get(): + if i['name'] == snapname: + snap_exist = True + continue + + if not snap_exist: + module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) + if proxmox.snapshot_rollback(vm, vmid, timeout, snapname): + if module.check_mode: + module.exit_json(changed=True, msg="Snapshot %s would be rolled back" % snapname) + else: + module.exit_json(changed=True, msg="Snapshot %s rolled back" % snapname) + + except Exception as e: + module.fail_json(msg="Rollback of snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py new file mode 100644 index 000000000..fd3759364 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Tristan Le Guern (@tleguern) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_storage_info +short_description: Retrieve information about one or more Proxmox VE storages +version_added: 2.2.0 +description: + - Retrieve information about one or more Proxmox VE storages. +options: + storage: + description: + - Only return informations on a specific storage. + aliases: ['name'] + type: str + type: + description: + - Filter on a specifc storage type. + type: str +author: Tristan Le Guern (@tleguern) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module +notes: + - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). +''' + + +EXAMPLES = ''' +- name: List existing storages + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + register: proxmox_storages + +- name: List NFS storages only + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + type: nfs + register: proxmox_storages_nfs + +- name: Retrieve information about the lvm2 storage + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + storage: lvm2 + register: proxmox_storage_lvm +''' + + +RETURN = ''' +proxmox_storages: + description: List of storage pools. + returned: on success + type: list + elements: dict + contains: + content: + description: Proxmox content types available in this storage + returned: on success + type: list + elements: str + digest: + description: Storage's digest + returned: on success + type: str + nodes: + description: List of nodes associated to this storage + returned: on success, if storage is not local + type: list + elements: str + path: + description: Physical path to this storage + returned: on success + type: str + prune-backups: + description: Backup retention options + returned: on success + type: list + elements: dict + shared: + description: Is this storage shared + returned: on success + type: bool + storage: + description: Storage name + returned: on success + type: str + type: + description: Storage type + returned: on success + type: str +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) + + +class ProxmoxStorageInfoAnsible(ProxmoxAnsible): + def get_storage(self, storage): + try: + storage = self.proxmox_api.storage.get(storage) + except Exception: + self.module.fail_json(msg="Storage '%s' does not exist" % storage) + return ProxmoxStorage(storage) + + def get_storages(self, type=None): + storages = self.proxmox_api.storage.get(type=type) + storages = [ProxmoxStorage(storage) for storage in storages] + return storages + + +class ProxmoxStorage: + def __init__(self, storage): + self.storage = storage + # Convert proxmox representation of lists, dicts and boolean for easier + # manipulation within ansible. + if 'shared' in self.storage: + self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) + if 'content' in self.storage: + self.storage['content'] = self.storage['content'].split(',') + if 'nodes' in self.storage: + self.storage['nodes'] = self.storage['nodes'].split(',') + if 'prune-backups' in storage: + options = storage['prune-backups'].split(',') + self.storage['prune-backups'] = dict() + for option in options: + k, v = option.split('=') + self.storage['prune-backups'][k] = v + + +def proxmox_storage_info_argument_spec(): + return dict( + storage=dict(type='str', aliases=['name']), + type=dict(type='str'), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + storage_info_args = proxmox_storage_info_argument_spec() + module_args.update(storage_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_one_of=[('api_password', 'api_token_id')], + required_together=[('api_token_id', 'api_token_secret')], + mutually_exclusive=[('storage', 'type')], + supports_check_mode=True + ) + result = dict( + changed=False + ) + + proxmox = ProxmoxStorageInfoAnsible(module) + storage = module.params['storage'] + storagetype = module.params['type'] + + if storage: + storages = [proxmox.get_storage(storage)] + else: + storages = proxmox.get_storages(type=storagetype) + result['proxmox_storages'] = [storage.storage for storage in storages] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py new file mode 100644 index 000000000..a2e66b38d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Andreas Botzner (@paginabianca) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: proxmox_tasks_info +short_description: Retrieve information about one or more Proxmox VE tasks +version_added: 3.8.0 +description: + - Retrieve information about one or more Proxmox VE tasks. +author: 'Andreas Botzner (@paginabianca) ' +options: + node: + description: + - Node where to get tasks. + required: true + type: str + task: + description: + - Return specific task. + aliases: ['upid', 'name'] + type: str +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module +''' + + +EXAMPLES = ''' +- name: List tasks on node01 + community.general.proxmox_task_info: + api_host: proxmoxhost + api_user: root@pam + api_password: '{{ password | default(omit) }}' + api_token_id: '{{ token_id | default(omit) }}' + api_token_secret: '{{ token_secret | default(omit) }}' + node: node01 + register: result + +- name: Retrieve information about specific tasks on node01 + community.general.proxmox_task_info: + api_host: proxmoxhost + api_user: root@pam + api_password: '{{ password | default(omit) }}' + api_token_id: '{{ token_id | default(omit) }}' + api_token_secret: '{{ token_secret | default(omit) }}' + task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' + node: node01 + register: proxmox_tasks +''' + + +RETURN = ''' +proxmox_tasks: + description: List of tasks. + returned: on success + type: list + elements: dict + contains: + id: + description: ID of the task. + returned: on success + type: str + node: + description: Node name. + returned: on success + type: str + pid: + description: PID of the task. + returned: on success + type: int + pstart: + description: pastart of the task. + returned: on success + type: int + starttime: + description: Starting time of the task. + returned: on success + type: int + type: + description: Type of the task. + returned: on success + type: str + upid: + description: UPID of the task. + returned: on success + type: str + user: + description: User that owns the task. + returned: on success + type: str + endtime: + description: Endtime of the task. + returned: on success, can be absent + type: int + status: + description: Status of the task. + returned: on success, can be absent + type: str + failed: + description: If the task failed. + returned: when status is defined + type: bool +msg: + description: Short message. + returned: on failure + type: str + sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxTaskInfoAnsible(ProxmoxAnsible): + def get_task(self, upid, node): + tasks = self.get_tasks(node) + for task in tasks: + if task.info['upid'] == upid: + return [task] + + def get_tasks(self, node): + tasks = self.proxmox_api.nodes(node).tasks.get() + return [ProxmoxTask(task) for task in tasks] + + +class ProxmoxTask: + def __init__(self, task): + self.info = dict() + for k, v in task.items(): + if k == 'status' and isinstance(v, str): + self.info[k] = v + if v != 'OK': + self.info['failed'] = True + else: + self.info[k] = v + + +def proxmox_task_info_argument_spec(): + return dict( + task=dict(type='str', aliases=['upid', 'name'], required=False), + node=dict(type='str', required=True), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + task_info_args = proxmox_task_info_argument_spec() + module_args.update(task_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('api_password', 'api_token_id')], + supports_check_mode=True) + result = dict(changed=False) + + proxmox = ProxmoxTaskInfoAnsible(module) + upid = module.params['task'] + node = module.params['node'] + if upid: + tasks = proxmox.get_task(upid=upid, node=node) + else: + tasks = proxmox.get_tasks(node=node) + if tasks is not None: + result['proxmox_tasks'] = [task.info for task in tasks] + module.exit_json(**result) + else: + result['msg'] = 'Task: {0} does not exist on node: {1}.'.format( + upid, node) + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_template.py b/ansible_collections/community/general/plugins/modules/proxmox_template.py new file mode 100644 index 000000000..2bf24ff84 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_template.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_template +short_description: Management of OS templates in Proxmox VE cluster +description: + - allows you to upload/delete templates in Proxmox VE cluster +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + node: + description: + - Proxmox VE node on which to operate. + type: str + src: + description: + - Path to uploaded file. + - Required only for I(state=present). + type: path + template: + description: + - The template name. + - Required for I(state=absent) to delete a template. + - Required for I(state=present) to download an appliance container template (pveam). + type: str + content_type: + description: + - Content type. + - Required only for I(state=present). + type: str + default: 'vztmpl' + choices: ['vztmpl', 'iso'] + storage: + description: + - Target storage. + type: str + default: 'local' + timeout: + description: + - Timeout for operations. + type: int + default: 30 + force: + description: + - It can only be used with I(state=present), existing template will be overwritten. + type: bool + default: false + state: + description: + - Indicate desired state of the template. + type: str + choices: ['present', 'absent'] + default: present +notes: + - Requires C(proxmoxer) and C(requests) modules on host. This modules can be installed with M(ansible.builtin.pip). +author: Sergei Antipov (@UnderGreen) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Upload new openvz template with minimal options + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz + +- name: > + Upload new openvz template with minimal options use environment + PROXMOX_PASSWORD variable(you should export it before) + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz + +- name: Upload new openvz template with all options and force overwrite + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + storage: local + content_type: vztmpl + src: ~/ubuntu-14.04-x86_64.tar.gz + force: true + +- name: Delete template with minimal options + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + template: ubuntu-14.04-x86_64.tar.gz + state: absent + +- name: Download proxmox appliance container template + community.general.proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + storage: local + content_type: vztmpl + template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz +''' + +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) + + +class ProxmoxTemplateAnsible(ProxmoxAnsible): + def get_template(self, node, storage, content_type, template): + return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get() + if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] + + def task_status(self, node, taskid, timeout): + """ + Check the task status and wait until the task is completed or the timeout is reached. + """ + while timeout: + if self.api_task_ok(node, taskid): + return True + timeout = timeout - 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' % + self.proxmox_api.node(node).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + + def upload_template(self, node, storage, content_type, realpath, timeout): + taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) + return self.task_status(node, taskid, timeout) + + def download_template(self, node, storage, template, timeout): + taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template) + return self.task_status(node, taskid, timeout) + + def delete_template(self, node, storage, content_type, template, timeout): + volid = '%s:%s/%s' % (storage, content_type, template) + self.proxmox_api.nodes(node).storage(storage).content.delete(volid) + while timeout: + if not self.get_template(node, storage, content_type, template): + return True + timeout = timeout - 1 + if timeout == 0: + self.module.fail_json(msg='Reached timeout while waiting for deleting template.') + + time.sleep(1) + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + template_args = dict( + node=dict(), + src=dict(type='path'), + template=dict(), + content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), + storage=dict(default='local'), + timeout=dict(type='int', default=30), + force=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + ) + module_args.update(template_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('api_password', 'api_token_id')], + required_if=[('state', 'absent', ['template'])] + ) + + proxmox = ProxmoxTemplateAnsible(module) + + state = module.params['state'] + node = module.params['node'] + storage = module.params['storage'] + timeout = module.params['timeout'] + + if state == 'present': + try: + content_type = module.params['content_type'] + src = module.params['src'] + + # download appliance template + if content_type == 'vztmpl' and not src: + template = module.params['template'] + + if not template: + module.fail_json(msg='template param for downloading appliance template is mandatory') + + if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) + + if proxmox.download_template(node, storage, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) + + template = os.path.basename(src) + if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) + elif not src: + module.fail_json(msg='src param to uploading template file is mandatory') + elif not (os.path.exists(src) and os.path.isfile(src)): + module.fail_json(msg='template file on path %s not exists' % src) + + if proxmox.upload_template(node, storage, content_type, src, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) + except Exception as e: + module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e)) + + elif state == 'absent': + try: + content_type = module.params['content_type'] + template = module.params['template'] + + if not proxmox.get_template(node, storage, content_type, template): + module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) + + if proxmox.delete_template(node, storage, content_type, template, timeout): + module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) + except Exception as e: + module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py new file mode 100644 index 000000000..a515f2b45 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Tristan Le Guern +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_user_info +short_description: Retrieve information about one or more Proxmox VE users +version_added: 1.3.0 +description: + - Retrieve information about one or more Proxmox VE users +options: + domain: + description: + - Restrict results to a specific authentication realm. + aliases: ['realm'] + type: str + user: + description: + - Restrict results to a specific user. + aliases: ['name'] + type: str + userid: + description: + - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. + type: str +author: Tristan Le Guern (@tleguern) +extends_documentation_fragment: + - community.general.proxmox.documentation + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = ''' +- name: List existing users + community.general.proxmox_user_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + register: proxmox_users + +- name: List existing users in the pve authentication realm + community.general.proxmox_user_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + domain: pve + register: proxmox_users_pve + +- name: Retrieve information about admin@pve + community.general.proxmox_user_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + userid: admin@pve + register: proxmox_user_admin + +- name: Alternative way to retrieve information about admin@pve + community.general.proxmox_user_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + user: admin + domain: pve + register: proxmox_user_admin +''' + + +RETURN = ''' +proxmox_users: + description: List of users. + returned: always, but can be empty + type: list + elements: dict + contains: + comment: + description: Short description of the user. + returned: on success + type: str + domain: + description: User's authentication realm, also the right part of the user ID. + returned: on success + type: str + email: + description: User's email address. + returned: on success + type: str + enabled: + description: User's account state. + returned: on success + type: bool + expire: + description: Expiration date in seconds since EPOCH. Zero means no expiration. + returned: on success + type: int + firstname: + description: User's first name. + returned: on success + type: str + groups: + description: List of groups which the user is a member of. + returned: on success + type: list + elements: str + keys: + description: User's two factor authentication keys. + returned: on success + type: str + lastname: + description: User's last name. + returned: on success + type: str + tokens: + description: List of API tokens associated to the user. + returned: on success + type: list + elements: dict + contains: + comment: + description: Short description of the token. + returned: on success + type: str + expire: + description: Expiration date in seconds since EPOCH. Zero means no expiration. + returned: on success + type: int + privsep: + description: Describe if the API token is further restricted with ACLs or is fully privileged. + returned: on success + type: bool + tokenid: + description: Token name. + returned: on success + type: str + user: + description: User's login name, also the left part of the user ID. + returned: on success + type: str + userid: + description: Proxmox user ID, represented as user@realm. + returned: on success + type: str +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) + + +class ProxmoxUserInfoAnsible(ProxmoxAnsible): + def get_user(self, userid): + try: + user = self.proxmox_api.access.users.get(userid) + except Exception: + self.module.fail_json(msg="User '%s' does not exist" % userid) + user['userid'] = userid + return ProxmoxUser(user) + + def get_users(self, domain=None): + users = self.proxmox_api.access.users.get(full=1) + users = [ProxmoxUser(user) for user in users] + if domain: + return [user for user in users if user.user['domain'] == domain] + return users + + +class ProxmoxUser: + def __init__(self, user): + self.user = dict() + # Data representation is not the same depending on API calls + for k, v in user.items(): + if k == 'enable': + self.user['enabled'] = proxmox_to_ansible_bool(user['enable']) + elif k == 'userid': + self.user['user'] = user['userid'].split('@')[0] + self.user['domain'] = user['userid'].split('@')[1] + self.user[k] = v + elif k in ['groups', 'tokens'] and (v == '' or v is None): + self.user[k] = [] + elif k == 'groups' and type(v) == str: + self.user['groups'] = v.split(',') + elif k == 'tokens' and type(v) == list: + for token in v: + if 'privsep' in token: + token['privsep'] = proxmox_to_ansible_bool(token['privsep']) + self.user['tokens'] = v + elif k == 'tokens' and type(v) == dict: + self.user['tokens'] = list() + for tokenid, tokenvalues in v.items(): + t = tokenvalues + t['tokenid'] = tokenid + if 'privsep' in tokenvalues: + t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep']) + self.user['tokens'].append(t) + else: + self.user[k] = v + + +def proxmox_user_info_argument_spec(): + return dict( + domain=dict(type='str', aliases=['realm']), + user=dict(type='str', aliases=['name']), + userid=dict(type='str'), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + user_info_args = proxmox_user_info_argument_spec() + module_args.update(user_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_one_of=[('api_password', 'api_token_id')], + required_together=[('api_token_id', 'api_token_secret')], + mutually_exclusive=[('user', 'userid'), ('domain', 'userid')], + supports_check_mode=True + ) + result = dict( + changed=False + ) + + proxmox = ProxmoxUserInfoAnsible(module) + domain = module.params['domain'] + user = module.params['user'] + if user and domain: + userid = user + '@' + domain + else: + userid = module.params['userid'] + + if userid: + users = [proxmox.get_user(userid=userid)] + else: + users = proxmox.get_users(domain=domain) + result['proxmox_users'] = [user.user for user in users] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py new file mode 100644 index 000000000..a03553c5c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py @@ -0,0 +1,639 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# PubNub Real-time Cloud-Hosted Push API and Push Notification Client +# Frameworks +# Copyright (C) 2016 PubNub Inc. +# http://www.pubnub.com/ +# http://www.pubnub.com/terms +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pubnub_blocks +short_description: PubNub blocks management module +description: + - "This module allows Ansible to interface with the PubNub BLOCKS + infrastructure by providing the following operations: create / remove, + start / stop and rename for blocks and create / modify / remove for event + handlers." +author: + - PubNub (@pubnub) + - Sergey Mamontov (@parfeon) +requirements: + - "python >= 2.7" + - "pubnub_blocks_client >= 1.0" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + email: + description: + - Email from account for which new session should be started. + - "Not required if C(cache) contains result of previous module call (in + same play)." + required: false + type: str + default: '' + password: + description: + - Password which match to account to which specified C(email) belong. + - "Not required if C(cache) contains result of previous module call (in + same play)." + required: false + type: str + default: '' + cache: + description: > + In case if single play use blocks management module few times it is + preferred to enabled 'caching' by making previous module to share + gathered artifacts and pass them to this parameter. + required: false + type: dict + default: {} + account: + description: + - "Name of PubNub account for from which C(application) will be used to + manage blocks." + - "User's account will be used if value not set or empty." + type: str + default: '' + application: + description: + - "Name of target PubNub application for which blocks configuration on + specific C(keyset) will be done." + type: str + required: true + keyset: + description: + - Name of application's keys set which is bound to managed blocks. + type: str + required: true + state: + description: + - "Intended block state after event handlers creation / update process + will be completed." + required: false + default: 'present' + choices: ['started', 'stopped', 'present', 'absent'] + type: str + name: + description: + - Name of managed block which will be later visible on admin.pubnub.com. + required: true + type: str + description: + description: + - Short block description which will be later visible on + admin.pubnub.com. Used only if block doesn't exists and won't change + description for existing block. + required: false + type: str + event_handlers: + description: + - "List of event handlers which should be updated for specified block + C(name)." + - "Each entry for new event handler should contain: C(name), C(src), + C(channels), C(event). C(name) used as event handler name which can be + used later to make changes to it." + - C(src) is full path to file with event handler code. + - "C(channels) is name of channel from which event handler is waiting + for events." + - "C(event) is type of event which is able to trigger event handler: + I(js-before-publish), I(js-after-publish), I(js-after-presence)." + - "Each entry for existing handlers should contain C(name) (so target + handler can be identified). Rest parameters (C(src), C(channels) and + C(event)) can be added if changes required for them." + - "It is possible to rename event handler by adding C(changes) key to + event handler payload and pass dictionary, which will contain single key + C(name), where new name should be passed." + - "To remove particular event handler it is possible to set C(state) for + it to C(absent) and it will be removed." + required: false + default: [] + type: list + elements: dict + changes: + description: + - "List of fields which should be changed by block itself (doesn't + affect any event handlers)." + - "Possible options for change is: C(name)." + required: false + default: {} + type: dict + validate_certs: + description: + - "This key allow to try skip certificates check when performing REST API + calls. Sometimes host may have issues with certificates on it and this + will cause problems to call PubNub REST API." + - If check should be ignored C(False) should be passed to this parameter. + required: false + default: true + type: bool +''' + +EXAMPLES = ''' +# Event handler create example. +- name: Create single event handler + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - + src: '{{ path_to_handler_source }}' + name: '{{ handler_name }}' + event: 'js-before-publish' + channels: '{{ handler_channel }}' + +# Change event handler trigger event type. +- name: Change event handler 'event' + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + event_handlers: + - + name: '{{ handler_name }}' + event: 'js-after-publish' + +# Stop block and event handlers. +- name: Stopping block + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: stop + +# Multiple module calls with cached result passing +- name: Create '{{ block_name }}' block + register: module_cache + community.general.pubnub_blocks: + email: '{{ email }}' + password: '{{ password }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present +- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}' + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - + src: '{{ path_to_handler_1_source }}' + name: '{{ event_handler_1_name }}' + channels: '{{ event_handler_1_channel }}' + event: 'js-before-publish' +- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}' + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: present + event_handlers: + - + src: '{{ path_to_handler_2_source }}' + name: '{{ event_handler_2_name }}' + channels: '{{ event_handler_2_channel }}' + event: 'js-before-publish' +- name: Start '{{ block_name }}' block + register: module_cache + community.general.pubnub_blocks: + cache: '{{ module_cache }}' + application: '{{ app_name }}' + keyset: '{{ keyset_name }}' + name: '{{ block_name }}' + state: started +''' + +RETURN = ''' +module_cache: + description: + - Cached account information. In case if with single play module + used few times it is better to pass cached data to next module calls to speed + up process. + type: dict + returned: always +''' +import copy +import os + +try: + # Import PubNub BLOCKS client. + from pubnub_blocks_client import User, Account, Owner, Application, Keyset # noqa: F401, pylint: disable=unused-import + from pubnub_blocks_client import Block, EventHandler + from pubnub_blocks_client import exceptions + HAS_PUBNUB_BLOCKS_CLIENT = True +except ImportError: + HAS_PUBNUB_BLOCKS_CLIENT = False + User = None + Account = None + Owner = None + Application = None + Keyset = None + Block = None + EventHandler = None + exceptions = None + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + + +def pubnub_user(module): + """Create and configure user model if it possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + + :rtype: User + :return: Reference on initialized and ready to use user or 'None' in + case if not all required information has been passed to block. + """ + user = None + params = module.params + + if params.get('cache') and params['cache'].get('module_cache'): + cache = params['cache']['module_cache'] + user = User() + user.restore(cache=copy.deepcopy(cache['pnm_user'])) + elif params.get('email') and params.get('password'): + user = User(email=params.get('email'), password=params.get('password')) + else: + err_msg = 'It looks like not account credentials has been passed or ' \ + '\'cache\' field doesn\'t have result of previous module ' \ + 'call.' + module.fail_json(msg='Missing account credentials.', + description=err_msg, changed=False) + + return user + + +def pubnub_account(module, user): + """Create and configure account if it is possible. + + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type user: User + :param user: Reference on authorized user for which one of accounts + should be used during manipulations with block. + + :rtype: Account + :return: Reference on initialized and ready to use account or 'None' in + case if not all required information has been passed to block. + """ + params = module.params + if params.get('account'): + account_name = params.get('account') + account = user.account(name=params.get('account')) + if account is None: + err_frmt = 'It looks like there is no \'{0}\' account for ' \ + 'authorized user. Please make sure what correct ' \ + 'name has been passed during module configuration.' + module.fail_json(msg='Missing account.', + description=err_frmt.format(account_name), + changed=False) + else: + account = user.accounts()[0] + + return account + + +def pubnub_application(module, account): + """Retrieve reference on target application from account model. + + NOTE: In case if account authorization will fail or there is no + application with specified name, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model from which reference + on application should be fetched. + + :rtype: Application + :return: Reference on initialized and ready to use application model. + """ + application = None + params = module.params + try: + application = account.application(params['application']) + except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=dict(account)) + + if application is None: + err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \ + 'correct application name has been passed. If application ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + email = account.owner.email + module.fail_json(msg=err_fmt.format(params['application'], email), + changed=account.changed, module_cache=dict(account)) + + return application + + +def pubnub_keyset(module, account, application): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no keyset with specified name, module will + exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be + used in case of error to export cached data. + :type application: Application + :param application: Reference on PubNub application model from which + reference on keyset should be fetched. + + :rtype: Keyset + :return: Reference on initialized and ready to use keyset model. + """ + params = module.params + keyset = application.keyset(params['keyset']) + if keyset is None: + err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \ + 'sure what correct keyset name has been passed. If keyset ' \ + 'doesn\'t exist you can create it on admin.pubnub.com.' + module.fail_json(msg=err_fmt.format(params['keyset'], + application.name), + changed=account.changed, module_cache=dict(account)) + + return keyset + + +def pubnub_block(module, account, keyset): + """Retrieve reference on target keyset from application model. + + NOTE: In case if there is no block with specified name and module + configured to start/stop it, module will exit with error. + :type module: AnsibleModule + :param module: Reference on module which contain module launch + information and status report methods. + :type account: Account + :param account: Reference on PubNub account model which will be used in + case of error to export cached data. + :type keyset: Keyset + :param keyset: Reference on keyset model from which reference on block + should be fetched. + + :rtype: Block + :return: Reference on initialized and ready to use keyset model. + """ + block = None + params = module.params + try: + block = keyset.block(params['name']) + except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc: + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, module_cache=dict(account)) + + # Report error because block doesn't exists and at the same time + # requested to start/stop. + if block is None and params['state'] in ['started', 'stopped']: + block_name = params.get('name') + module.fail_json(msg="'{0}' block doesn't exists.".format(block_name), + changed=account.changed, module_cache=dict(account)) + + if block is None and params['state'] == 'present': + block = Block(name=params.get('name'), + description=params.get('description')) + keyset.add_block(block) + + if block: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + if params.get('description'): + block.description = params.get('description') + + return block + + +def pubnub_event_handler(block, data): + """Retrieve reference on target event handler from application model. + + :type block: Block + :param block: Reference on block model from which reference on event + handlers should be fetched. + :type data: dict + :param data: Reference on dictionary which contain information about + event handler and whether it should be created or not. + + :rtype: EventHandler + :return: Reference on initialized and ready to use event handler model. + 'None' will be returned in case if there is no handler with + specified name and no request to create it. + """ + event_handler = block.event_handler(data['name']) + + # Prepare payload for event handler update. + changed_name = (data.pop('changes').get('name') + if 'changes' in data else None) + name = data.get('name') or changed_name + channels = data.get('channels') + event = data.get('event') + code = _content_of_file_at_path(data.get('src')) + state = data.get('state') or 'present' + + # Create event handler if required. + if event_handler is None and state == 'present': + event_handler = EventHandler(name=name, channels=channels, event=event, + code=code) + block.add_event_handler(event_handler) + + # Update event handler if required. + if event_handler is not None and state == 'present': + if name is not None: + event_handler.name = name + if channels is not None: + event_handler.channels = channels + if event is not None: + event_handler.event = event + if code is not None: + event_handler.code = code + + return event_handler + + +def _failure_title_from_exception(exception): + """Compose human-readable title for module error title. + + Title will be based on status codes if they has been provided. + :type exception: exceptions.GeneralPubNubError + :param exception: Reference on exception for which title should be + composed. + + :rtype: str + :return: Reference on error tile which should be shown on module + failure. + """ + title = 'General REST API access error.' + if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS: + title = 'Authorization error: missing credentials.' + elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS: + title = 'Authorization error: wrong credentials.' + elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS: + title = 'API access error: insufficient access rights.' + elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED: + title = 'API access error: time token expired.' + elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS: + title = 'Block create did fail: block with same name already exists).' + elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL: + title = 'Unable fetch list of blocks for keyset.' + elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL: + title = 'Block creation did fail.' + elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL: + title = 'Block update did fail.' + elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL: + title = 'Block removal did fail.' + elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL: + title = 'Block start/stop did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS: + title = 'Event handler creation did fail: missing fields.' + elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL: + title = 'Event handler creation did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL: + title = 'Event handler update did fail.' + elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL: + title = 'Event handler removal did fail.' + + return title + + +def _content_of_file_at_path(path): + """Read file content. + + Try read content of file at specified path. + :type path: str + :param path: Full path to location of file which should be read'ed. + :rtype: content + :return: File content or 'None' + """ + content = None + if path and os.path.exists(path): + with open(path, mode="rt") as opened_file: + b_content = opened_file.read() + try: + content = to_text(b_content, errors='surrogate_or_strict') + except UnicodeError: + pass + + return content + + +def main(): + fields = dict( + email=dict(default='', required=False, type='str'), + password=dict(default='', required=False, type='str', no_log=True), + account=dict(default='', required=False, type='str'), + application=dict(required=True, type='str'), + keyset=dict(required=True, type='str', no_log=False), + state=dict(default='present', type='str', + choices=['started', 'stopped', 'present', 'absent']), + name=dict(required=True, type='str'), description=dict(type='str'), + event_handlers=dict(default=list(), type='list', elements='dict'), + changes=dict(default=dict(), type='dict'), + cache=dict(default=dict(), type='dict'), + validate_certs=dict(default=True, type='bool')) + module = AnsibleModule(argument_spec=fields, supports_check_mode=True) + + if not HAS_PUBNUB_BLOCKS_CLIENT: + module.fail_json(msg='pubnub_blocks_client required for this module.') + + params = module.params + + # Authorize user. + user = pubnub_user(module) + # Initialize PubNub account instance. + account = pubnub_account(module, user=user) + # Try fetch application with which module should work. + application = pubnub_application(module, account=account) + # Try fetch keyset with which module should work. + keyset = pubnub_keyset(module, account=account, application=application) + # Try fetch block with which module should work. + block = pubnub_block(module, account=account, keyset=keyset) + is_new_block = block is not None and block.uid == -1 + + # Check whether block should be removed or not. + if block is not None and params['state'] == 'absent': + keyset.remove_block(block) + block = None + + if block is not None: + # Update block information if required. + if params.get('changes') and params['changes'].get('name'): + block.name = params['changes']['name'] + + # Process event changes to event handlers. + for event_handler_data in params.get('event_handlers') or list(): + state = event_handler_data.get('state') or 'present' + event_handler = pubnub_event_handler(data=event_handler_data, + block=block) + if state == 'absent' and event_handler: + block.delete_event_handler(event_handler) + + # Update block operation state if required. + if block and not is_new_block: + if params['state'] == 'started': + block.start() + elif params['state'] == 'stopped': + block.stop() + + # Save current account state. + if not module.check_mode: + try: + account.save() + except (exceptions.APIAccessError, exceptions.KeysetError, + exceptions.BlockError, exceptions.EventHandlerError, + exceptions.GeneralPubNubError) as exc: + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + exc_msg = _failure_title_from_exception(exc) + exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0] + module.fail_json(msg=exc_msg, description=exc_descr, + changed=account.changed, + module_cache=module_cache) + + # Report module execution results. + module_cache = dict(account) + module_cache.update(dict(pnm_user=dict(user))) + changed_will_change = account.changed or account.will_change + module.exit_json(changed=changed_will_change, module_cache=module_cache) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pulp_repo.py b/ansible_collections/community/general/plugins/modules/pulp_repo.py new file mode 100644 index 000000000..d7333f89e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pulp_repo.py @@ -0,0 +1,743 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Joe Adams <@sysadmind> +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pulp_repo +author: "Joe Adams (@sysadmind)" +short_description: Add or remove Pulp repos from a remote host +description: + - Add or remove Pulp repos from a remote host. + - Note, this is for Pulp 2 only. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + add_export_distributor: + description: + - Whether or not to add the export distributor to new C(rpm) repositories. + type: bool + default: false + feed: + description: + - Upstream feed URL to receive updates from. + type: str + force_basic_auth: + description: + - httplib2, the library used by the M(ansible.builtin.uri) module only sends + authentication information when a webservice responds to an initial + request with a 401 status. Since some basic auth services do not + properly send a 401, logins will fail. This option forces the sending of + the Basic authentication header upon initial request. + type: bool + default: false + generate_sqlite: + description: + - Boolean flag to indicate whether sqlite files should be generated during + a repository publish. + required: false + type: bool + default: false + feed_ca_cert: + description: + - CA certificate string used to validate the feed source SSL certificate. + This can be the file content or the path to the file. + type: str + aliases: [ importer_ssl_ca_cert ] + feed_client_cert: + description: + - Certificate used as the client certificate when synchronizing the + repository. This is used to communicate authentication information to + the feed source. The value to this option must be the full path to the + certificate. The specified file may be the certificate itself or a + single file containing both the certificate and private key. This can be + the file content or the path to the file. + type: str + aliases: [ importer_ssl_client_cert ] + feed_client_key: + description: + - Private key to the certificate specified in I(importer_ssl_client_cert), + assuming it is not included in the certificate file itself. This can be + the file content or the path to the file. + type: str + aliases: [ importer_ssl_client_key ] + name: + description: + - Name of the repo to add or remove. This correlates to repo-id in Pulp. + required: true + type: str + aliases: [ repo ] + proxy_host: + description: + - Proxy url setting for the pulp repository importer. This is in the + format scheme://host. + required: false + default: null + type: str + proxy_port: + description: + - Proxy port setting for the pulp repository importer. + required: false + default: null + type: str + proxy_username: + description: + - Proxy username for the pulp repository importer. + required: false + default: null + type: str + proxy_password: + description: + - Proxy password for the pulp repository importer. + required: false + default: null + type: str + publish_distributor: + description: + - Distributor to use when state is C(publish). The default is to + publish all distributors. + type: str + pulp_host: + description: + - URL of the pulp server to connect to. + default: https://127.0.0.1 + type: str + relative_url: + description: + - Relative URL for the local repository. It's required when state=present. + type: str + repo_type: + description: + - Repo plugin type to use (i.e. C(rpm), C(docker)). + default: rpm + type: str + repoview: + description: + - Whether to generate repoview files for a published repository. Setting + this to C(true) automatically activates C(generate_sqlite). + required: false + type: bool + default: false + serve_http: + description: + - Make the repo available over HTTP. + type: bool + default: false + serve_https: + description: + - Make the repo available over HTTPS. + type: bool + default: true + state: + description: + - The repo state. A state of C(sync) will queue a sync of the repo. + This is asynchronous but not delayed like a scheduled sync. A state of + C(publish) will use the repository's distributor to publish the content. + default: present + choices: [ "present", "absent", "sync", "publish" ] + type: str + url_password: + description: + - The password for use in HTTP basic authentication to the pulp API. + If the I(url_username) parameter is not specified, the I(url_password) + parameter will not be used. + url_username: + description: + - The username for use in HTTP basic authentication to the pulp API. + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be + used on personally controlled sites using self-signed certificates. + type: bool + default: true + wait_for_completion: + description: + - Wait for asynchronous tasks to complete before returning. + type: bool + default: false +notes: + - This module can currently only create distributors and importers on rpm + repositories. Contributions to support other repo types are welcome. +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Create a new repo with name 'my_repo' + community.general.pulp_repo: + name: my_repo + relative_url: my/repo + state: present + +- name: Create a repo with a feed and a relative URL + community.general.pulp_repo: + name: my_centos_updates + repo_type: rpm + feed: http://mirror.centos.org/centos/6/updates/x86_64/ + relative_url: centos/6/updates + url_username: admin + url_password: admin + force_basic_auth: true + state: present + +- name: Remove a repo from the pulp server + community.general.pulp_repo: + name: my_old_repo + repo_type: rpm + state: absent +''' + +RETURN = ''' +repo: + description: Name of the repo that the action was performed on. + returned: success + type: str + sample: my_repo +''' + +import json +import os +from time import sleep + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.urls import url_argument_spec + + +class pulp_server(object): + """ + Class to interact with a Pulp server + """ + + def __init__(self, module, pulp_host, repo_type, wait_for_completion=False): + self.module = module + self.host = pulp_host + self.repo_type = repo_type + self.repo_cache = dict() + self.wait_for_completion = wait_for_completion + + def check_repo_exists(self, repo_id): + try: + self.get_repo_config_by_id(repo_id) + except IndexError: + return False + else: + return True + + def compare_repo_distributor_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + for key, value in kwargs.items(): + if key not in distributor['config'].keys(): + return False + + if not distributor['config'][key] == value: + return False + + return True + + def compare_repo_importer_config(self, repo_id, **kwargs): + repo_config = self.get_repo_config_by_id(repo_id) + + for importer in repo_config['importers']: + for key, value in kwargs.items(): + if value is not None: + if key not in importer['config'].keys(): + return False + + if not importer['config'][key] == value: + return False + + return True + + def create_repo( + self, + repo_id, + relative_url, + feed=None, + generate_sqlite=False, + serve_http=False, + serve_https=True, + proxy_host=None, + proxy_port=None, + proxy_username=None, + proxy_password=None, + repoview=False, + ssl_ca_cert=None, + ssl_client_cert=None, + ssl_client_key=None, + add_export_distributor=False + ): + url = "%s/pulp/api/v2/repositories/" % self.host + data = dict() + data['id'] = repo_id + data['distributors'] = [] + + if self.repo_type == 'rpm': + yum_distributor = dict() + yum_distributor['distributor_id'] = "yum_distributor" + yum_distributor['distributor_type_id'] = "yum_distributor" + yum_distributor['auto_publish'] = True + yum_distributor['distributor_config'] = dict() + yum_distributor['distributor_config']['http'] = serve_http + yum_distributor['distributor_config']['https'] = serve_https + yum_distributor['distributor_config']['relative_url'] = relative_url + yum_distributor['distributor_config']['repoview'] = repoview + yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(yum_distributor) + + if add_export_distributor: + export_distributor = dict() + export_distributor['distributor_id'] = "export_distributor" + export_distributor['distributor_type_id'] = "export_distributor" + export_distributor['auto_publish'] = False + export_distributor['distributor_config'] = dict() + export_distributor['distributor_config']['http'] = serve_http + export_distributor['distributor_config']['https'] = serve_https + export_distributor['distributor_config']['relative_url'] = relative_url + export_distributor['distributor_config']['repoview'] = repoview + export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview + data['distributors'].append(export_distributor) + + data['importer_type_id'] = "yum_importer" + data['importer_config'] = dict() + + if feed: + data['importer_config']['feed'] = feed + + if proxy_host: + data['importer_config']['proxy_host'] = proxy_host + + if proxy_port: + data['importer_config']['proxy_port'] = proxy_port + + if proxy_username: + data['importer_config']['proxy_username'] = proxy_username + + if proxy_password: + data['importer_config']['proxy_password'] = proxy_password + + if ssl_ca_cert: + data['importer_config']['ssl_ca_cert'] = ssl_ca_cert + + if ssl_client_cert: + data['importer_config']['ssl_client_cert'] = ssl_client_cert + + if ssl_client_key: + data['importer_config']['ssl_client_key'] = ssl_client_key + + data['notes'] = { + "_repo-type": "rpm-repo" + } + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 201: + self.module.fail_json( + msg="Failed to create repo.", + status_code=info['status'], + response=info['msg'], + url=url) + else: + return True + + def delete_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='DELETE') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to delete repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def get_repo_config_by_id(self, repo_id): + if repo_id not in self.repo_cache.keys(): + repo_array = [x for x in self.repo_list if x['id'] == repo_id] + self.repo_cache[repo_id] = repo_array[0] + + return self.repo_cache[repo_id] + + def publish_repo(self, repo_id, publish_distributor): + url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id) + + # If there's no distributor specified, we will publish them all + if publish_distributor is None: + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + data = dict() + data['id'] = distributor['id'] + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo.", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=distributor['id']) + else: + data = dict() + data['id'] = publish_distributor + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to publish the repo", + status_code=info['status'], + response=info['msg'], + url=url, + distributor=publish_distributor) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def sync_repo(self, repo_id): + url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id) + response, info = fetch_url(self.module, url, data='', method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to schedule a sync of the repo.", + status_code=info['status'], + response=info['msg'], + url=url) + + if self.wait_for_completion: + self.verify_tasks_completed(json.load(response)) + + return True + + def update_repo_distributor_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id) + repo_config = self.get_repo_config_by_id(repo_id) + + for distributor in repo_config['distributors']: + distributor_url = "%s%s/" % (url, distributor['id']) + data = dict() + data['distributor_config'] = dict() + + for key, value in kwargs.items(): + data['distributor_config'][key] = value + + response, info = fetch_url( + self.module, + distributor_url, + data=json.dumps(data), + method='PUT') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the relative url for the repository.", + status_code=info['status'], + response=info['msg'], + url=url) + + def update_repo_importer_config(self, repo_id, **kwargs): + url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id) + data = dict() + importer_config = dict() + + for key, value in kwargs.items(): + if value is not None: + importer_config[key] = value + + data['importer_config'] = importer_config + + if self.repo_type == 'rpm': + data['importer_type_id'] = "yum_importer" + + response, info = fetch_url( + self.module, + url, + data=json.dumps(data), + method='POST') + + if info['status'] != 202: + self.module.fail_json( + msg="Failed to set the repo importer configuration", + status_code=info['status'], + response=info['msg'], + importer_config=importer_config, + url=url) + + def set_repo_list(self): + url = "%s/pulp/api/v2/repositories/?details=true" % self.host + response, info = fetch_url(self.module, url, method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Request failed", + status_code=info['status'], + response=info['msg'], + url=url) + + self.repo_list = json.load(response) + + def verify_tasks_completed(self, response_dict): + for task in response_dict['spawned_tasks']: + task_url = "%s%s" % (self.host, task['_href']) + + while True: + response, info = fetch_url( + self.module, + task_url, + data='', + method='GET') + + if info['status'] != 200: + self.module.fail_json( + msg="Failed to check async task status.", + status_code=info['status'], + response=info['msg'], + url=task_url) + + task_dict = json.load(response) + + if task_dict['state'] == 'finished': + return True + + if task_dict['state'] == 'error': + self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error']) + + sleep(2) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + add_export_distributor=dict(default=False, type='bool'), + feed=dict(), + generate_sqlite=dict(default=False, type='bool'), + feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']), + feed_client_cert=dict(aliases=['importer_ssl_client_cert']), + feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), + name=dict(required=True, aliases=['repo']), + proxy_host=dict(), + proxy_port=dict(), + proxy_username=dict(), + proxy_password=dict(no_log=True), + publish_distributor=dict(), + pulp_host=dict(default="https://127.0.0.1"), + relative_url=dict(), + repo_type=dict(default="rpm"), + repoview=dict(default=False, type='bool'), + serve_http=dict(default=False, type='bool'), + serve_https=dict(default=True, type='bool'), + state=dict( + default="present", + choices=['absent', 'present', 'sync', 'publish']), + wait_for_completion=dict(default=False, type="bool")) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + add_export_distributor = module.params['add_export_distributor'] + feed = module.params['feed'] + generate_sqlite = module.params['generate_sqlite'] + importer_ssl_ca_cert = module.params['feed_ca_cert'] + importer_ssl_client_cert = module.params['feed_client_cert'] + importer_ssl_client_key = module.params['feed_client_key'] + proxy_host = module.params['proxy_host'] + proxy_port = module.params['proxy_port'] + proxy_username = module.params['proxy_username'] + proxy_password = module.params['proxy_password'] + publish_distributor = module.params['publish_distributor'] + pulp_host = module.params['pulp_host'] + relative_url = module.params['relative_url'] + repo = module.params['name'] + repo_type = module.params['repo_type'] + repoview = module.params['repoview'] + serve_http = module.params['serve_http'] + serve_https = module.params['serve_https'] + state = module.params['state'] + wait_for_completion = module.params['wait_for_completion'] + + if (state == 'present') and (not relative_url): + module.fail_json(msg="When state is present, relative_url is required.") + + # Ensure that the importer_ssl_* is the content and not a file path + if importer_ssl_ca_cert is not None: + importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) + if os.path.isfile(importer_ssl_ca_cert_file_path): + importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r') + try: + importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() + finally: + importer_ssl_ca_cert_file_object.close() + + if importer_ssl_client_cert is not None: + importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) + if os.path.isfile(importer_ssl_client_cert_file_path): + importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r') + try: + importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() + finally: + importer_ssl_client_cert_file_object.close() + + if importer_ssl_client_key is not None: + importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) + if os.path.isfile(importer_ssl_client_key_file_path): + importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r') + try: + importer_ssl_client_key = importer_ssl_client_key_file_object.read() + finally: + importer_ssl_client_key_file_object.close() + + server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) + server.set_repo_list() + repo_exists = server.check_repo_exists(repo) + + changed = False + + if state == 'absent' and repo_exists: + if not module.check_mode: + server.delete_repo(repo) + + changed = True + + if state == 'sync': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be synced.") + + if not module.check_mode: + server.sync_repo(repo) + + changed = True + + if state == 'publish': + if not repo_exists: + module.fail_json(msg="Repository was not found. The repository can not be published.") + + if not module.check_mode: + server.publish_repo(repo, publish_distributor) + + changed = True + + if state == 'present': + if not repo_exists: + if not module.check_mode: + server.create_repo( + repo_id=repo, + relative_url=relative_url, + feed=feed, + generate_sqlite=generate_sqlite, + serve_http=serve_http, + serve_https=serve_https, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + repoview=repoview, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key, + add_export_distributor=add_export_distributor) + + changed = True + + else: + # Check to make sure all the settings are correct + # The importer config gets overwritten on set and not updated, so + # we set the whole config at the same time. + if not server.compare_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key + ): + if not module.check_mode: + server.update_repo_importer_config( + repo, + feed=feed, + proxy_host=proxy_host, + proxy_port=proxy_port, + proxy_username=proxy_username, + proxy_password=proxy_password, + ssl_ca_cert=importer_ssl_ca_cert, + ssl_client_cert=importer_ssl_client_cert, + ssl_client_key=importer_ssl_client_key) + + changed = True + + if relative_url is not None: + if not server.compare_repo_distributor_config( + repo, + relative_url=relative_url + ): + if not module.check_mode: + server.update_repo_distributor_config( + repo, + relative_url=relative_url) + + changed = True + + if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite): + if not module.check_mode: + server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite) + + changed = True + + if not server.compare_repo_distributor_config(repo, repoview=repoview): + if not module.check_mode: + server.update_repo_distributor_config(repo, repoview=repoview) + + changed = True + + if not server.compare_repo_distributor_config(repo, http=serve_http): + if not module.check_mode: + server.update_repo_distributor_config(repo, http=serve_http) + + changed = True + + if not server.compare_repo_distributor_config(repo, https=serve_https): + if not module.check_mode: + server.update_repo_distributor_config(repo, https=serve_https) + + changed = True + + module.exit_json(changed=changed, repo=repo) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/puppet.py b/ansible_collections/community/general/plugins/modules/puppet.py new file mode 100644 index 000000000..cd580791b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/puppet.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: puppet +short_description: Runs puppet +description: + - Runs I(puppet) agent or apply in a reliable manner. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + timeout: + description: + - How long to wait for I(puppet) to finish. + type: str + default: 30m + puppetmaster: + description: + - The hostname of the puppetmaster to contact. + type: str + modulepath: + description: + - Path to an alternate location for puppet modules. + type: str + manifest: + description: + - Path to the manifest file to run puppet apply on. + type: str + noop: + description: + - Override puppet.conf noop mode. + - When C(true), run Puppet agent with C(--noop) switch set. + - When C(false), run Puppet agent with C(--no-noop) switch set. + - When unset (default), use default or puppet.conf value if defined. + type: bool + facts: + description: + - A dict of values to pass in as persistent external facter facts. + type: dict + facter_basename: + description: + - Basename of the facter output file. + type: str + default: ansible + environment: + description: + - Puppet environment to be used. + type: str + confdir: + description: + - Path to the directory containing the puppet.conf file. + type: str + version_added: 5.1.0 + logdest: + description: + - Where the puppet logs should go, if puppet apply is being used. + - C(all) will go to both C(console) and C(syslog). + - C(stdout) will be deprecated and replaced by C(console). + type: str + choices: [ all, stdout, syslog ] + default: stdout + certname: + description: + - The name to use when handling certificates. + type: str + tags: + description: + - A list of puppet tags to be used. + type: list + elements: str + skip_tags: + description: + - A list of puppet tags to be excluded. + type: list + elements: str + version_added: 6.6.0 + execute: + description: + - Execute a specific piece of Puppet code. + - It has no effect with a puppetmaster. + type: str + use_srv_records: + description: + - Toggles use_srv_records flag + type: bool + summarize: + description: + - Whether to print a transaction summary. + type: bool + default: false + verbose: + description: + - Print extra information. + type: bool + default: false + debug: + description: + - Enable full debugging. + type: bool + default: false + show_diff: + description: + - Whether to print file changes details + - Alias C(show-diff) has been deprecated and will be removed in community.general 7.0.0. + aliases: ['show-diff'] + type: bool + default: false +requirements: +- puppet +author: +- Monty Taylor (@emonty) +''' + +EXAMPLES = r''' +- name: Run puppet agent and fail if anything goes wrong + community.general.puppet: + +- name: Run puppet and timeout in 5 minutes + community.general.puppet: + timeout: 5m + +- name: Run puppet using a different environment + community.general.puppet: + environment: testing + +- name: Run puppet using a specific certname + community.general.puppet: + certname: agent01.example.com + +- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster + community.general.puppet: + execute: include ::mymodule + +- name: Run puppet using a specific tags + community.general.puppet: + tags: + - update + - nginx + skip_tags: + - service + +- name: Run puppet agent in noop mode + community.general.puppet: + noop: true + +- name: Run a manifest with debug, log to both syslog and console, specify module path + community.general.puppet: + modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules + logdest: all + manifest: /var/lib/example/puppet_step_config.pp +''' + +import json +import os +import stat + +import ansible_collections.community.general.plugins.module_utils.puppet as puppet_utils + +from ansible.module_utils.basic import AnsibleModule + + +def _write_structured_data(basedir, basename, data): + if not os.path.exists(basedir): + os.makedirs(basedir) + file_path = os.path.join(basedir, "{0}.json".format(basename)) + # This is more complex than you might normally expect because we want to + # open the file with only u+rw set. Also, we use the stat constants + # because ansible still supports python 2.4 and the octal syntax changed + out_file = os.fdopen( + os.open( + file_path, os.O_CREAT | os.O_WRONLY, + stat.S_IRUSR | stat.S_IWUSR), 'w') + out_file.write(json.dumps(data).encode('utf8')) + out_file.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + timeout=dict(type='str', default='30m'), + puppetmaster=dict(type='str'), + modulepath=dict(type='str'), + manifest=dict(type='str'), + confdir=dict(type='str'), + noop=dict(type='bool'), + logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), + # The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154 + show_diff=dict( + type='bool', default=False, aliases=['show-diff'], + deprecated_aliases=[dict(name='show-diff', version='7.0.0', collection_name='community.general')]), + facts=dict(type='dict'), + facter_basename=dict(type='str', default='ansible'), + environment=dict(type='str'), + certname=dict(type='str'), + tags=dict(type='list', elements='str'), + skip_tags=dict(type='list', elements='str'), + execute=dict(type='str'), + summarize=dict(type='bool', default=False), + debug=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + use_srv_records=dict(type='bool'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('puppetmaster', 'manifest'), + ('puppetmaster', 'manifest', 'execute'), + ('puppetmaster', 'modulepath'), + ], + ) + p = module.params + + if p['manifest']: + if not os.path.exists(p['manifest']): + module.fail_json( + msg="Manifest file %(manifest)s not found." % dict( + manifest=p['manifest'])) + + # Check if puppet is disabled here + if not p['manifest']: + puppet_utils.ensure_agent_enabled(module) + + if module.params['facts'] and not module.check_mode: + _write_structured_data( + puppet_utils.get_facter_dir(), + module.params['facter_basename'], + module.params['facts']) + + runner = puppet_utils.puppet_runner(module) + + if not p['manifest'] and not p['execute']: + args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run() + else: + args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + + if rc == 0: + # success + module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) + elif rc == 1: + # rc==1 could be because it's disabled + # rc==1 could also mean there was a compilation failure + disabled = "administratively disabled" in stdout + if disabled: + msg = "puppet is disabled" + else: + msg = "puppet did not run" + module.exit_json( + rc=rc, disabled=disabled, msg=msg, + error=True, stdout=stdout, stderr=stderr) + elif rc == 2: + # success with changes + module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) + elif rc == 124: + # timeout + module.exit_json( + rc=rc, msg="%s timed out" % ctx.cmd, stdout=stdout, stderr=stderr) + else: + # failure + module.fail_json( + rc=rc, msg="%s failed with return code: %d" % (ctx.cmd, rc), + stdout=stdout, stderr=stderr) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pushbullet.py b/ansible_collections/community/general/plugins/modules/pushbullet.py new file mode 100644 index 000000000..c7e20c373 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pushbullet.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +author: "Willy Barro (@willybarro)" +requirements: [ pushbullet.py ] +module: pushbullet +short_description: Sends notifications to Pushbullet +description: + - This module sends push notifications via Pushbullet to channels or devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + api_key: + type: str + description: + - Push bullet API token + required: true + channel: + type: str + description: + - The channel TAG you wish to broadcast a push notification, + as seen on the "My Channels" > "Edit your channel" at + Pushbullet page. + device: + type: str + description: + - The device NAME you wish to send a push notification, + as seen on the Pushbullet main page. + push_type: + type: str + description: + - Thing you wish to push. + default: note + choices: [ "note", "link" ] + title: + type: str + description: + - Title of the notification. + required: true + body: + type: str + description: + - Body of the notification, e.g. Details of the fault you're alerting. + url: + type: str + description: + - URL field, used when I(push_type) is C(link). + +notes: + - Requires pushbullet.py Python package on the remote host. + You can install it via pip with ($ pip install pushbullet.py). + See U(https://github.com/randomchars/pushbullet.py) +''' + +EXAMPLES = ''' +- name: Sends a push notification to a device + community.general.pushbullet: + api_key: "ABC123abc123ABC123abc123ABC123ab" + device: "Chrome" + title: "You may see this on Google Chrome" + +- name: Sends a link to a device + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + device: Chrome + push_type: link + title: Ansible Documentation + body: https://docs.ansible.com/ + +- name: Sends a push notification to a channel + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: Broadcasting a message to the #my-awesome-channel folks + +- name: Sends a push notification with title and body to a channel + community.general.pushbullet: + api_key: ABC123abc123ABC123abc123ABC123ab + channel: my-awesome-channel + title: ALERT! Signup service is down + body: Error rate on signup service is over 90% for more than 2 minutes +''' + +import traceback + +PUSHBULLET_IMP_ERR = None +try: + from pushbullet import PushBullet + from pushbullet.errors import InvalidKeyError, PushError +except ImportError: + PUSHBULLET_IMP_ERR = traceback.format_exc() + pushbullet_found = False +else: + pushbullet_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + + +# =========================================== +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', required=True, no_log=True), + channel=dict(type='str', default=None), + device=dict(type='str', default=None), + push_type=dict(type='str', default="note", choices=['note', 'link']), + title=dict(type='str', required=True), + body=dict(type='str', default=None), + url=dict(type='str', default=None), + ), + mutually_exclusive=( + ['channel', 'device'], + ), + supports_check_mode=True + ) + + api_key = module.params['api_key'] + channel = module.params['channel'] + device = module.params['device'] + push_type = module.params['push_type'] + title = module.params['title'] + body = module.params['body'] + url = module.params['url'] + + if not pushbullet_found: + module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR) + + # Init pushbullet + try: + pb = PushBullet(api_key) + target = None + except InvalidKeyError: + module.fail_json(msg="Invalid api_key") + + # Checks for channel/device + if device is None and channel is None: + module.fail_json(msg="You need to provide a channel or a device.") + + # Search for given device + if device is not None: + devices_by_nickname = {} + for d in pb.devices: + devices_by_nickname[d.nickname] = d + + if device in devices_by_nickname: + target = devices_by_nickname[device] + else: + module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys()))) + + # Search for given channel + if channel is not None: + channels_by_tag = {} + for c in pb.channels: + channels_by_tag[c.channel_tag] = c + + if channel in channels_by_tag: + target = channels_by_tag[channel] + else: + module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys()))) + + # If in check mode, exit saying that we succeeded + if module.check_mode: + module.exit_json(changed=False, msg="OK") + + # Send push notification + try: + if push_type == "link": + target.push_link(title, url, body) + else: + target.push_note(title, body) + module.exit_json(changed=False, msg="OK") + except PushError as e: + module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) + + module.fail_json(msg="An unknown error has occurred") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/pushover.py b/ansible_collections/community/general/plugins/modules/pushover.py new file mode 100644 index 000000000..f5493731f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/pushover.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2012, Jim Richardson +# Copyright (c) 2019, Bernd Arnold +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: pushover +short_description: Send notifications via U(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + msg: + type: str + description: + - What message you wish to send. + required: true + app_token: + type: str + description: + - Pushover issued token identifying your pushover app. + required: true + user_key: + type: str + description: + - Pushover issued authentication key for your user. + required: true + title: + type: str + description: + - Message title. + required: false + pri: + type: str + description: + - Message priority (see U(https://pushover.net) for details). + required: false + default: '0' + choices: [ '-2', '-1', '0', '1', '2' ] + device: + type: str + description: + - A device the message should be sent to. Multiple devices can be specified, separated by a comma. + required: false + version_added: 1.2.0 + +author: + - "Jim Richardson (@weaselkeeper)" + - "Bernd Arnold (@wopfel)" +''' + +EXAMPLES = ''' +- name: Send notifications via pushover.net + community.general.pushover: + msg: '{{ inventory_hostname }} is acting strange ...' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost + +- name: Send notifications via pushover.net + community.general.pushover: + title: 'Alert!' + msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' + pri: 1 + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost + +- name: Send notifications via pushover.net to a specific device + community.general.pushover: + msg: '{{ inventory_hostname }} has been lost somewhere' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + device: admins-iPhone + delegate_to: localhost +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +class Pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + base_uri = 'https://api.pushover.net' + + def __init__(self, module, user, token): + self.module = module + self.user = user + self.token = token + + def run(self, priority, msg, title, device): + ''' Do, whatever it is, we do. ''' + + url = '%s/1/messages.json' % (self.base_uri) + + # parse config + options = dict(user=self.user, + token=self.token, + priority=priority, + message=msg) + + if title is not None: + options = dict(options, + title=title) + + if device is not None: + options = dict(options, + device=device) + + data = urlencode(options) + + headers = {"Content-type": "application/x-www-form-urlencoded"} + r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers) + if info['status'] != 200: + raise Exception(info) + + return r.read() + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + title=dict(type='str'), + msg=dict(required=True), + app_token=dict(required=True, no_log=True), + user_key=dict(required=True, no_log=True), + pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), + device=dict(type='str'), + ), + ) + + msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) + try: + response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device']) + except Exception: + module.fail_json(msg='Unable to send msg via pushover') + + module.exit_json(msg='message sent successfully: %s' % response, changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/ansible_collections/community/general/plugins/modules/python_requirements_info.py new file mode 100644 index 000000000..231114a1d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/python_requirements_info.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +module: python_requirements_info +short_description: Show python path and assert dependency versions +description: + - Get info about available Python requirements on the target host, including listing required libraries and gathering versions. + - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + dependencies: + type: list + elements: str + description: > + A list of version-likes or module names to check for installation. + Supported operators: <, >, <=, >=, or ==. The bare module name like + I(ansible), the module with a specific version like I(boto3==1.6.1), or a + partial version like I(requests>2) are all valid specifications. + default: [] +author: + - Will Thames (@willthames) + - Ryan Scott Brown (@ryansb) +''' + +EXAMPLES = ''' +- name: Show python lib/site paths + community.general.python_requirements_info: + +- name: Check for modern boto3 and botocore versions + community.general.python_requirements_info: + dependencies: + - boto3>1.6 + - botocore<2 +''' + +RETURN = ''' +python: + description: path to python version used + returned: always + type: str + sample: /usr/local/opt/python@2/bin/python2.7 +python_version: + description: version of python + returned: always + type: str + sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]" +python_version_info: + description: breakdown version of python + returned: always + type: dict + contains: + major: + description: The C(major) component of the python interpreter version. + returned: always + type: int + sample: 3 + minor: + description: The C(minor) component of the python interpreter version. + returned: always + type: int + sample: 8 + micro: + description: The C(micro) component of the python interpreter version. + returned: always + type: int + sample: 10 + releaselevel: + description: The C(releaselevel) component of the python interpreter version. + returned: always + type: str + sample: final + serial: + description: The C(serial) component of the python interpreter version. + returned: always + type: int + sample: 0 + version_added: 4.2.0 +python_system_path: + description: List of paths python is looking for modules in + returned: always + type: list + sample: + - /usr/local/opt/python@2/site-packages/ + - /usr/lib/python/site-packages/ +valid: + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null + returned: always + type: dict + sample: + boto3: + desired: null + installed: 1.7.60 + botocore: + desired: botocore<2 + installed: 1.10.60 +mismatched: + description: A dictionary of dependencies that did not satisfy the desired version + returned: always + type: dict + sample: + botocore: + desired: botocore>2 + installed: 1.10.60 +not_found: + description: A list of packages that could not be imported at all, and are not installed + returned: always + type: list + sample: + - boto4 + - requests +''' + +import re +import sys +import operator + +HAS_DISTUTILS = False +try: + import pkg_resources + from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + HAS_DISTUTILS = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule + +operations = { + '<=': operator.le, + '>=': operator.ge, + '<': operator.lt, + '>': operator.gt, + '==': operator.eq, +} + +python_version_info = dict( + major=sys.version_info[0], + minor=sys.version_info[1], + micro=sys.version_info[2], + releaselevel=sys.version_info[3], + serial=sys.version_info[4], +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dependencies=dict(type='list', elements='str', default=[]) + ), + supports_check_mode=True, + ) + if not HAS_DISTUTILS: + module.fail_json( + msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.', + python=sys.executable, + python_version=sys.version, + python_version_info=python_version_info, + python_system_path=sys.path, + ) + pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$') + + results = dict( + not_found=[], + mismatched={}, + valid={}, + ) + + for dep in module.params['dependencies']: + match = pkg_dep_re.match(dep) + if not match: + module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep)) + pkg, op, version = match.groups() + if op is not None and op not in operations: + module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep)) + try: + existing = pkg_resources.get_distribution(pkg).version + except pkg_resources.DistributionNotFound: + # not there + results['not_found'].append(pkg) + continue + if op is None and version is None: + results['valid'][pkg] = { + 'installed': existing, + 'desired': None, + } + elif operations[op](LooseVersion(existing), LooseVersion(version)): + results['valid'][pkg] = { + 'installed': existing, + 'desired': dep, + } + else: + results['mismatched'][pkg] = { + 'installed': existing, + 'desired': dep, + } + + module.exit_json( + python=sys.executable, + python_version=sys.version, + python_version_info=python_version_info, + python_system_path=sys.path, + **results + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax.py b/ansible_collections/community/general/plugins/modules/rax.py new file mode 100644 index 000000000..47c0a6d1b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax.py @@ -0,0 +1,903 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax +short_description: Create / delete an instance in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud instance and optionally + waits for it to be 'running'. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + auto_increment: + description: + - Whether or not to increment a single number with the name of the + created servers. Only applicable when used with the I(group) attribute + or meta key. + type: bool + default: true + boot_from_volume: + description: + - Whether or not to boot the instance from a Cloud Block Storage volume. + If C(true) and I(image) is specified a new volume will be created at + boot time. I(boot_volume_size) is required with I(image) to create a + new volume at boot time. + type: bool + default: false + boot_volume: + type: str + description: + - Cloud Block Storage ID or Name to use as the boot volume of the + instance + boot_volume_size: + type: int + description: + - Size of the volume to create in Gigabytes. This is only required with + I(image) and I(boot_from_volume). + default: 100 + boot_volume_terminate: + description: + - Whether the I(boot_volume) or newly created volume from I(image) will + be terminated when the server is terminated + type: bool + default: false + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + type: bool + default: false + count: + type: int + description: + - number of instances to launch + default: 1 + count_offset: + type: int + description: + - number count to start at + default: 1 + disk_config: + type: str + description: + - Disk partitioning strategy + - If not specified it will assume the value C(auto). + choices: + - auto + - manual + exact_count: + description: + - Explicitly ensure an exact count of instances, used with + state=active/present. If specified as C(true) and I(count) is less than + the servers matched, servers will be deleted to match the count. If + the number of matched servers is fewer than specified in I(count) + additional servers will be added. + type: bool + default: false + extra_client_args: + type: dict + default: {} + description: + - A hash of key/value pairs to be used when creating the cloudservers + client. This is considered an advanced option, use it wisely and + with caution. + extra_create_args: + type: dict + default: {} + description: + - A hash of key/value pairs to be used when creating a new server. + This is considered an advanced option, use it wisely and with caution. + files: + type: dict + default: {} + description: + - Files to insert into the instance. remotefilename:localcontent + flavor: + type: str + description: + - flavor to use for the instance + group: + type: str + description: + - host group to assign to server, is also used for idempotent operations + to ensure a specific number of instances + image: + type: str + description: + - image to use for the instance. Can be an C(id), C(human_id) or C(name). + With I(boot_from_volume), a Cloud Block Storage volume will be created + with this image + instance_ids: + type: list + elements: str + description: + - list of instance ids, currently only used when state='absent' to + remove instances + key_name: + type: str + description: + - key pair to use on the instance + aliases: + - keypair + meta: + type: dict + default: {} + description: + - A hash of metadata to associate with the instance + name: + type: str + description: + - Name to give the instance + networks: + type: list + elements: str + description: + - The network to attach to the instances. If specified, you must include + ALL networks including the public and private interfaces. Can be C(id) + or C(label). + default: + - public + - private + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + user_data: + type: str + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Jesse Keating (@omgjlk)" + - "Matt Martz (@sivel)" +notes: + - I(exact_count) can be "destructive" if the number of running servers in + the I(group) is larger than that specified in I(count). In such a case, the + I(state) is effectively set to C(absent) and the extra servers are deleted. + In the case of deletion, the returned data structure will have C(action) + set to C(delete), and the oldest servers in the group will be deleted. +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a Cloud Server + gather_facts: false + tasks: + - name: Server build request + local_action: + module: rax + credentials: ~/.raxpub + name: rax-test1 + flavor: 5 + image: b11d9567-e412-4255-96b9-bd63ab23bcfe + key_name: my_rackspace_key + files: + /root/test.txt: /home/localuser/test.txt + wait: true + state: present + networks: + - private + - public + register: rax + +- name: Build an exact count of cloud servers with incremented names + hosts: local + gather_facts: false + tasks: + - name: Server build requests + local_action: + module: rax + credentials: ~/.raxpub + name: test%03d.example.org + flavor: performance1-1 + image: ubuntu-1204-lts-precise-pangolin + state: present + count: 10 + count_offset: 10 + exact_count: true + group: test + wait: true + register: rax +''' + +import json +import os +import re +import time + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, + rax_find_image, rax_find_network, rax_find_volume, + rax_required_together, rax_to_dict, setup_rax_module) +from ansible.module_utils.six.moves import xrange +from ansible.module_utils.six import string_types + + +def rax_find_server_image(module, server, image, boot_volume): + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + return None + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + return None + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + return None + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + return None + + return server.image + + +def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, + files=None, wait=True, wait_timeout=300, disk_config=None, + group=None, nics=None, extra_create_args=None, user_data=None, + config_drive=False, existing=None, block_device_mapping_v2=None): + names = [] if names is None else names + meta = {} if meta is None else meta + files = {} if files is None else files + nics = [] if nics is None else nics + extra_create_args = {} if extra_create_args is None else extra_create_args + existing = [] if existing is None else existing + block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 + + cs = pyrax.cloudservers + changed = False + + if user_data: + config_drive = True + + if user_data and os.path.isfile(os.path.expanduser(user_data)): + try: + user_data = os.path.expanduser(user_data) + f = open(user_data) + user_data = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % user_data) + + # Handle the file contents + for rpath in files.keys(): + lpath = os.path.expanduser(files[rpath]) + try: + fileobj = open(lpath, 'r') + files[rpath] = fileobj.read() + fileobj.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % lpath) + try: + servers = [] + bdmv2 = block_device_mapping_v2 + for name in names: + servers.append(cs.servers.create(name=name, image=image, + flavor=flavor, meta=meta, + key_name=key_name, + files=files, nics=nics, + disk_config=disk_config, + config_drive=config_drive, + userdata=user_data, + block_device_mapping_v2=bdmv2, + **extra_create_args)) + except Exception as e: + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) + else: + changed = True + + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + for server in servers: + try: + server.get() + except Exception: + server.status = 'ERROR' + + if not filter(lambda s: s.status not in FINAL_STATUSES, + servers): + break + time.sleep(5) + + success = [] + error = [] + timeout = [] + for server in servers: + try: + server.get() + except Exception: + server.status = 'ERROR' + instance = rax_to_dict(server, 'server') + if server.status == 'ACTIVE' or not wait: + success.append(instance) + elif server.status == 'ERROR': + error.append(instance) + elif wait: + timeout.append(instance) + + untouched = [rax_to_dict(s, 'server') for s in existing] + instances = success + untouched + + results = { + 'changed': changed, + 'action': 'create', + 'instances': instances, + 'success': success, + 'error': error, + 'timeout': timeout, + 'instance_ids': { + 'instances': [i['id'] for i in instances], + 'success': [i['id'] for i in success], + 'error': [i['id'] for i in error], + 'timeout': [i['id'] for i in timeout] + } + } + + if timeout: + results['msg'] = 'Timeout waiting for all servers to build' + elif error: + results['msg'] = 'Failed to build all servers' + + if 'msg' in results: + module.fail_json(**results) + else: + module.exit_json(**results) + + +def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): + instance_ids = [] if instance_ids is None else instance_ids + kept = [] if kept is None else kept + + cs = pyrax.cloudservers + + changed = False + instances = {} + servers = [] + + for instance_id in instance_ids: + servers.append(cs.servers.get(instance_id)) + + for server in servers: + try: + server.delete() + except Exception as e: + module.fail_json(msg=e.message) + else: + changed = True + + instance = rax_to_dict(server, 'server') + instances[instance['id']] = instance + + # If requested, wait for server deletion + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + for server in servers: + instance_id = server.id + try: + server.get() + except Exception: + instances[instance_id]['status'] = 'DELETED' + instances[instance_id]['rax_status'] = 'DELETED' + + if not filter(lambda s: s['status'] not in ('', 'DELETED', + 'ERROR'), + instances.values()): + break + + time.sleep(5) + + timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), + instances.values()) + error = filter(lambda s: s['status'] in ('ERROR'), + instances.values()) + success = filter(lambda s: s['status'] in ('', 'DELETED'), + instances.values()) + + instances = [rax_to_dict(s, 'server') for s in kept] + + results = { + 'changed': changed, + 'action': 'delete', + 'instances': instances, + 'success': success, + 'error': error, + 'timeout': timeout, + 'instance_ids': { + 'instances': [i['id'] for i in instances], + 'success': [i['id'] for i in success], + 'error': [i['id'] for i in error], + 'timeout': [i['id'] for i in timeout] + } + } + + if timeout: + results['msg'] = 'Timeout waiting for all servers to delete' + elif error: + results['msg'] = 'Failed to delete all servers' + + if 'msg' in results: + module.fail_json(**results) + else: + module.exit_json(**results) + + +def cloudservers(module, state=None, name=None, flavor=None, image=None, + meta=None, key_name=None, files=None, wait=True, wait_timeout=300, + disk_config=None, count=1, group=None, instance_ids=None, + exact_count=False, networks=None, count_offset=0, + auto_increment=False, extra_create_args=None, user_data=None, + config_drive=False, boot_from_volume=False, + boot_volume=None, boot_volume_size=None, + boot_volume_terminate=False): + meta = {} if meta is None else meta + files = {} if files is None else files + instance_ids = [] if instance_ids is None else instance_ids + networks = [] if networks is None else networks + extra_create_args = {} if extra_create_args is None else extra_create_args + + cs = pyrax.cloudservers + cnw = pyrax.cloud_networks + if not cnw: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present' or (state == 'absent' and instance_ids is None): + if not boot_from_volume and not boot_volume and not image: + module.fail_json(msg='image is required for the "rax" module') + + for arg, value in dict(name=name, flavor=flavor).items(): + if not value: + module.fail_json(msg='%s is required for the "rax" module' % + arg) + + if boot_from_volume and not image and not boot_volume: + module.fail_json(msg='image or boot_volume are required for the ' + '"rax" with boot_from_volume') + + if boot_from_volume and image and not boot_volume_size: + module.fail_json(msg='boot_volume_size is required for the "rax" ' + 'module with boot_from_volume and image') + + if boot_from_volume and image and boot_volume: + image = None + + servers = [] + + # Add the group meta key + if group and 'group' not in meta: + meta['group'] = group + elif 'group' in meta and group is None: + group = meta['group'] + + # Normalize and ensure all metadata values are strings + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + # When using state=absent with group, the absent block won't match the + # names properly. Use the exact_count functionality to decrease the count + # to the desired level + was_absent = False + if group is not None and state == 'absent': + exact_count = True + state = 'present' + was_absent = True + + if image: + image = rax_find_image(module, pyrax, image) + + nics = [] + if networks: + for network in networks: + nics.extend(rax_find_network(module, pyrax, network)) + + # act on the state + if state == 'present': + # Idempotent ensurance of a specific count of servers + if exact_count is not False: + # See if we can find servers that match our options + if group is None: + module.fail_json(msg='"group" must be provided when using ' + '"exact_count"') + + if auto_increment: + numbers = set() + + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, count_offset + count) + available_numbers = list(set(number_range) + .difference(numbers)) + else: # Not auto incrementing + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + # available_numbers not needed here, we inspect auto_increment + # again later + + # If state was absent but the count was changed, + # assume we only wanted to remove that number of instances + if was_absent: + diff = len(servers) - count + if diff < 0: + count = 0 + else: + count = diff + + if len(servers) > count: + # We have more servers than we need, set state='absent' + # and delete the extras, this should delete the oldest + state = 'absent' + kept = servers[:count] + del servers[:count] + instance_ids = [] + for server in servers: + instance_ids.append(server.id) + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout, kept=kept) + elif len(servers) < count: + # we have fewer servers than we need + if auto_increment: + # auto incrementing server numbers + names = [] + name_slice = count - len(servers) + numbers_to_use = available_numbers[:name_slice] + for number in numbers_to_use: + names.append(name % number) + else: + # We are not auto incrementing server numbers, + # create a list of 'name' that matches how many we need + names = [name] * (count - len(servers)) + else: + # we have the right number of servers, just return info + # about all of the matched servers + instances = [] + instance_ids = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + instance_ids.append(server.id) + module.exit_json(changed=False, action=None, + instances=instances, + success=[], error=[], timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + else: # not called with exact_count=True + if group is not None: + if auto_increment: + # we are auto incrementing server numbers, but not with + # exact_count + numbers = set() + + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError as e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, + count_offset + count + len(numbers)) + available_numbers = list(set(number_range) + .difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + # Not auto incrementing + names = [name] * count + else: + # No group was specified, and not using exact_count + # Perform more simplistic matching + search_opts = { + 'name': '^%s$' % name, + 'flavor': flavor + } + servers = [] + for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + + # Ignore servers with non matching metadata + if server.metadata != meta: + continue + servers.append(server) + + if len(servers) >= count: + # We have more servers than were requested, don't do + # anything. Not running with exact_count=True, so we assume + # more is OK + instances = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + + instance_ids = [i['id'] for i in instances] + module.exit_json(changed=False, action=None, + instances=instances, success=[], error=[], + timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + + # We need more servers to reach out target, create names for + # them, we aren't performing auto_increment here + names = [name] * (count - len(servers)) + + block_device_mapping_v2 = [] + if boot_from_volume: + mapping = { + 'boot_index': '0', + 'delete_on_termination': boot_volume_terminate, + 'destination_type': 'volume', + } + if image: + mapping.update({ + 'uuid': image, + 'source_type': 'image', + 'volume_size': boot_volume_size, + }) + image = None + elif boot_volume: + volume = rax_find_volume(module, pyrax, boot_volume) + mapping.update({ + 'uuid': pyrax.utils.get_id(volume), + 'source_type': 'volume', + }) + block_device_mapping_v2.append(mapping) + + create(module, names=names, flavor=flavor, image=image, + meta=meta, key_name=key_name, files=files, wait=wait, + wait_timeout=wait_timeout, disk_config=disk_config, group=group, + nics=nics, extra_create_args=extra_create_args, + user_data=user_data, config_drive=config_drive, + existing=servers, + block_device_mapping_v2=block_device_mapping_v2) + + elif state == 'absent': + if instance_ids is None: + # We weren't given an explicit list of server IDs to delete + # Let's match instead + search_opts = { + 'name': '^%s$' % name, + 'flavor': flavor + } + for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + + # Ignore servers with non matching metadata + if meta != server.metadata: + continue + + servers.append(server) + + # Build a list of server IDs to delete + instance_ids = [] + for server in servers: + if len(instance_ids) < count: + instance_ids.append(server.id) + else: + break + + if not instance_ids: + # No server IDs were matched for deletion, or no IDs were + # explicitly provided, just exit and don't do anything + module.exit_json(changed=False, action=None, instances=[], + success=[], error=[], timeout=[], + instance_ids={'instances': [], + 'success': [], 'error': [], + 'timeout': []}) + + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + auto_increment=dict(default=True, type='bool'), + boot_from_volume=dict(default=False, type='bool'), + boot_volume=dict(type='str'), + boot_volume_size=dict(type='int', default=100), + boot_volume_terminate=dict(type='bool', default=False), + config_drive=dict(default=False, type='bool'), + count=dict(default=1, type='int'), + count_offset=dict(default=1, type='int'), + disk_config=dict(choices=['auto', 'manual']), + exact_count=dict(default=False, type='bool'), + extra_client_args=dict(type='dict', default={}), + extra_create_args=dict(type='dict', default={}), + files=dict(type='dict', default={}), + flavor=dict(), + group=dict(), + image=dict(), + instance_ids=dict(type='list', elements='str'), + key_name=dict(aliases=['keypair']), + meta=dict(type='dict', default={}), + name=dict(), + networks=dict(type='list', elements='str', default=['public', 'private']), + state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=300, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + auto_increment = module.params.get('auto_increment') + boot_from_volume = module.params.get('boot_from_volume') + boot_volume = module.params.get('boot_volume') + boot_volume_size = module.params.get('boot_volume_size') + boot_volume_terminate = module.params.get('boot_volume_terminate') + config_drive = module.params.get('config_drive') + count = module.params.get('count') + count_offset = module.params.get('count_offset') + disk_config = module.params.get('disk_config') + if disk_config: + disk_config = disk_config.upper() + exact_count = module.params.get('exact_count', False) + extra_client_args = module.params.get('extra_client_args') + extra_create_args = module.params.get('extra_create_args') + files = module.params.get('files') + flavor = module.params.get('flavor') + group = module.params.get('group') + image = module.params.get('image') + instance_ids = module.params.get('instance_ids') + key_name = module.params.get('key_name') + meta = module.params.get('meta') + name = module.params.get('name') + networks = module.params.get('networks') + state = module.params.get('state') + user_data = module.params.get('user_data') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + if extra_client_args: + pyrax.cloudservers = pyrax.connect_to_cloudservers( + region=pyrax.cloudservers.client.region_name, + **extra_client_args) + client = pyrax.cloudservers.client + if 'bypass_url' in extra_client_args: + client.management_url = extra_client_args['bypass_url'] + + if pyrax.cloudservers is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + cloudservers(module, state=state, name=name, flavor=flavor, + image=image, meta=meta, key_name=key_name, files=files, + wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, + count=count, group=group, instance_ids=instance_ids, + exact_count=exact_count, networks=networks, + count_offset=count_offset, auto_increment=auto_increment, + extra_create_args=extra_create_args, user_data=user_data, + config_drive=config_drive, boot_from_volume=boot_from_volume, + boot_volume=boot_volume, boot_volume_size=boot_volume_size, + boot_volume_terminate=boot_volume_terminate) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs.py b/ansible_collections/community/general/plugins/modules/rax_cbs.py new file mode 100644 index 000000000..c99626904 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_cbs.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_cbs +short_description: Manipulate Rackspace Cloud Block Storage Volumes +description: + - Manipulate Rackspace Cloud Block Storage Volumes + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + description: + type: str + description: + - Description to give the volume being created. + image: + type: str + description: + - Image to use for bootable volumes. Can be an C(id), C(human_id) or + C(name). This option requires C(pyrax>=1.9.3). + meta: + type: dict + default: {} + description: + - A hash of metadata to associate with the volume. + name: + type: str + description: + - Name to give the volume being created. + required: true + size: + type: int + description: + - Size of the volume to create in Gigabytes. + default: 100 + snapshot_id: + type: str + description: + - The id of the snapshot to create the volume from. + state: + type: str + description: + - Indicate desired state of the resource. + choices: + - present + - absent + default: present + volume_type: + type: str + description: + - Type of the volume being created. + choices: + - SATA + - SSD + default: SATA + wait: + description: + - Wait for the volume to be in state C(available) before returning. + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds. + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a Block Storage Volume + gather_facts: false + hosts: local + connection: local + tasks: + - name: Storage volume create request + local_action: + module: rax_cbs + credentials: ~/.raxpub + name: my-volume + description: My Volume + volume_type: SSD + size: 150 + region: DFW + wait: true + state: present + meta: + app: my-cool-app + register: my_volume +''' + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, + rax_required_together, rax_to_dict, setup_rax_module) + + +def cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout, + image): + changed = False + volume = None + instance = {} + + cbs = pyrax.cloud_blockstorage + + if cbs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if image: + # pyrax<1.9.3 did not have support for specifying an image when + # creating a volume which is required for bootable volumes + if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): + module.fail_json(msg='Creating a bootable volume requires ' + 'pyrax>=1.9.3') + image = rax_find_image(module, pyrax, image) + + volume = rax_find_volume(module, pyrax, name) + + if state == 'present': + if not volume: + kwargs = dict() + if image: + kwargs['image'] = image + try: + volume = cbs.create(name, size=size, volume_type=volume_type, + description=description, + metadata=meta, + snapshot_id=snapshot_id, **kwargs) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_for_build(volume, interval=5, + attempts=attempts) + + volume.get() + instance = rax_to_dict(volume) + + result = dict(changed=changed, volume=instance) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait and volume.status not in VOLUME_STATUS: + result['msg'] = 'Timeout waiting on %s' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + if volume: + instance = rax_to_dict(volume) + try: + volume.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + description=dict(type='str'), + image=dict(type='str'), + meta=dict(type='dict', default={}), + name=dict(required=True), + size=dict(type='int', default=100), + snapshot_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + description = module.params.get('description') + image = module.params.get('image') + meta = module.params.get('meta') + name = module.params.get('name') + size = module.params.get('size') + snapshot_id = module.params.get('snapshot_id') + state = module.params.get('state') + volume_type = module.params.get('volume_type') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout, + image) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py new file mode 100644 index 000000000..8f540fa0f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_cbs_attachments +short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments +description: + - Manipulate Rackspace Cloud Block Storage Volume Attachments + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + device: + type: str + description: + - The device path to attach the volume to, e.g. /dev/xvde. + - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. + volume: + type: str + description: + - Name or id of the volume to attach/detach + required: true + server: + type: str + description: + - Name or id of the server to attach/detach + required: true + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + wait: + description: + - wait for the volume to be in 'in-use'/'available' state before returning + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Attach a Block Storage Volume + gather_facts: false + hosts: local + connection: local + tasks: + - name: Storage volume attach request + local_action: + module: rax_cbs_attachments + credentials: ~/.raxpub + volume: my-volume + server: my-server + device: /dev/xvdd + region: DFW + wait: true + state: present + register: my_volume +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, + rax_argument_spec, + rax_find_server, + rax_find_volume, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_block_storage_attachments(module, state, volume, server, device, + wait, wait_timeout): + cbs = pyrax.cloud_blockstorage + cs = pyrax.cloudservers + + if cbs is None or cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + changed = False + instance = {} + + volume = rax_find_volume(module, pyrax, volume) + + if not volume: + module.fail_json(msg='No matching storage volumes were found') + + if state == 'present': + server = rax_find_server(module, pyrax, server) + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + changed = False + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + else: + try: + volume.attach_to_instance(server, mountpoint=device) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + volume.get() + + for key, value in vars(volume).items(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + result = dict(changed=changed) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_until(volume, 'status', 'in-use', + interval=5, attempts=attempts) + + volume.get() + result['volume'] = rax_to_dict(volume) + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + server = rax_find_server(module, pyrax, server) + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + try: + volume.detach() + if wait: + pyrax.utils.wait_until(volume, 'status', 'available', + interval=3, attempts=0, + verbose=False) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + volume.get() + changed = True + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + + result = dict(changed=changed, volume=rax_to_dict(volume)) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + device=dict(required=False), + volume=dict(required=True), + server=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + device = module.params.get('device') + volume = module.params.get('volume') + server = module.params.get('server') + state = module.params.get('state') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_block_storage_attachments(module, state, volume, server, device, + wait, wait_timeout) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb.py b/ansible_collections/community/general/plugins/modules/rax_cdb.py new file mode 100644 index 000000000..cf0366d3b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_cdb.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_cdb +short_description: Create/delete or resize a Rackspace Cloud Databases instance +description: + - creates / deletes or resize a Rackspace Cloud Databases instance + and optionally waits for it to be 'running'. The name option needs to be + unique since it's used to identify the instance. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the databases server instance + required: true + flavor: + type: int + description: + - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) + default: 1 + volume: + type: int + description: + - Volume size of the database 1-150GB + default: 2 + cdb_type: + type: str + description: + - type of instance (i.e. MySQL, MariaDB, Percona) + default: MySQL + aliases: ['type'] + cdb_version: + type: str + description: + - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) + - "The available choices are: C(5.1), C(5.6) and C(10)." + default: '5.6' + aliases: ['version'] + state: + type: str + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + wait: + description: + - wait for the instance to be in state 'running' before returning + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds + default: 300 +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a Cloud Databases + gather_facts: false + tasks: + - name: Server build request + local_action: + module: rax_cdb + credentials: ~/.raxpub + region: IAD + name: db-server1 + flavor: 1 + volume: 2 + cdb_type: MySQL + cdb_version: 5.6 + wait: true + state: present + register: rax_db_server +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_instance(name): + + cdb = pyrax.cloud_databases + instances = cdb.list() + if instances: + for instance in instances: + if instance.name == name: + return instance + return False + + +def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout): + + for arg, value in dict(name=name, flavor=flavor, + volume=volume, type=cdb_type, version=cdb_version + ).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb"' + ' module' % arg) + + if not (volume >= 1 and volume <= 150): + module.fail_json(msg='volume is required to be between 1 and 150') + + cdb = pyrax.cloud_databases + + flavors = [] + for item in cdb.list_flavors(): + flavors.append(item.id) + + if not (flavor in flavors): + module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) + + changed = False + + instance = find_instance(name) + + if not instance: + action = 'create' + try: + instance = cdb.create(name=name, flavor=flavor, volume=volume, + type=cdb_type, version=cdb_version) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + else: + action = None + + if instance.volume.size != volume: + action = 'resize' + if instance.volume.size > volume: + module.fail_json(changed=False, action=action, + msg='The new volume size must be larger than ' + 'the current volume size', + cdb=rax_to_dict(instance)) + instance.resize_volume(volume) + changed = True + + if int(instance.flavor.id) != flavor: + action = 'resize' + pyrax.utils.wait_until(instance, 'status', 'ACTIVE', + attempts=wait_timeout) + instance.resize(flavor) + changed = True + + if wait: + pyrax.utils.wait_until(instance, 'status', 'ACTIVE', + attempts=wait_timeout) + + if wait and instance.status != 'ACTIVE': + module.fail_json(changed=changed, action=action, + cdb=rax_to_dict(instance), + msg='Timeout waiting for "%s" databases instance to ' + 'be created' % name) + + module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) + + +def delete_instance(module, name, wait, wait_timeout): + + if not name: + module.fail_json(msg='name is required for the "rax_cdb" module') + + changed = False + + instance = find_instance(name) + if not instance: + module.exit_json(changed=False, action='delete') + + try: + instance.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + if wait: + pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', + attempts=wait_timeout) + + if wait and instance.status != 'SHUTDOWN': + module.fail_json(changed=changed, action='delete', + cdb=rax_to_dict(instance), + msg='Timeout waiting for "%s" databases instance to ' + 'be deleted' % name) + + module.exit_json(changed=changed, action='delete', + cdb=rax_to_dict(instance)) + + +def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout): + + # act on the state + if state == 'present': + save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, + wait_timeout) + elif state == 'absent': + delete_instance(module, name, wait, wait_timeout) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + flavor=dict(type='int', default=1), + volume=dict(type='int', default=2), + cdb_type=dict(type='str', default='MySQL', aliases=['type']), + cdb_version=dict(type='str', default='5.6', aliases=['version']), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + flavor = module.params.get('flavor') + volume = module.params.get('volume') + cdb_type = module.params.get('cdb_type') + cdb_version = module.params.get('cdb_version') + state = module.params.get('state') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py new file mode 100644 index 000000000..35b076aad --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: rax_cdb_database +short_description: Create / delete a database in the Cloud Databases +description: + - create / delete a database in the Cloud Databases. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cdb_id: + type: str + description: + - The databases server UUID + required: true + name: + type: str + description: + - Name to give to the database + required: true + character_set: + type: str + description: + - Set of symbols and encodings + default: 'utf8' + collate: + type: str + description: + - Set of rules for comparing characters in a character set + default: 'utf8_general_ci' + state: + type: str + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a database in Cloud Databases + tasks: + - name: Database build request + local_action: + module: rax_cdb_database + credentials: ~/.raxpub + region: IAD + cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 + name: db1 + state: present + register: rax_db_database +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_database(instance, name): + try: + database = instance.get_database(name) + except Exception: + return False + + return database + + +def save_database(module, cdb_id, name, character_set, collate): + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + database = find_database(instance, name) + + if not database: + try: + database = instance.create_database(name=name, + character_set=character_set, + collate=collate) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='create', + database=rax_to_dict(database)) + + +def delete_database(module, cdb_id, name): + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + database = find_database(instance, name) + + if database: + try: + database.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='delete', + database=rax_to_dict(database)) + + +def rax_cdb_database(module, state, cdb_id, name, character_set, collate): + + # act on the state + if state == 'present': + save_database(module, cdb_id, name, character_set, collate) + elif state == 'absent': + delete_database(module, cdb_id, name) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + cdb_id=dict(type='str', required=True), + name=dict(type='str', required=True), + character_set=dict(type='str', default='utf8'), + collate=dict(type='str', default='utf8_general_ci'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + cdb_id = module.params.get('cdb_id') + name = module.params.get('name') + character_set = module.params.get('character_set') + collate = module.params.get('collate') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + rax_cdb_database(module, state, cdb_id, name, character_set, collate) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py new file mode 100644 index 000000000..a2cd675d9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_cdb_user +short_description: Create / delete a Rackspace Cloud Database +description: + - create / delete a database in the Cloud Databases. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cdb_id: + type: str + description: + - The databases server UUID + required: true + db_username: + type: str + description: + - Name of the database user + required: true + db_password: + type: str + description: + - Database user password + required: true + databases: + type: list + elements: str + description: + - Name of the databases that the user can access + default: [] + host: + type: str + description: + - Specifies the host from which a user is allowed to connect to + the database. Possible values are a string containing an IPv4 address + or "%" to allow connecting from any host + default: '%' + state: + type: str + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present +author: "Simon JAILLET (@jails)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a user in Cloud Databases + tasks: + - name: User build request + local_action: + module: rax_cdb_user + credentials: ~/.raxpub + region: IAD + cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 + db_username: user1 + db_password: user1 + databases: ['db1'] + state: present + register: rax_db_user +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module + + +def find_user(instance, name): + try: + user = instance.get_user(name) + except Exception: + return False + + return user + + +def save_user(module, cdb_id, name, password, databases, host): + + for arg, value in dict(cdb_id=cdb_id, name=name).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb_user" ' + 'module' % arg) + + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + user = find_user(instance, name) + + if not user: + action = 'create' + try: + user = instance.create_user(name=name, + password=password, + database_names=databases, + host=host) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + else: + action = 'update' + + if user.host != host: + changed = True + + user.update(password=password, host=host) + + former_dbs = set([item.name for item in user.list_user_access()]) + databases = set(databases) + + if databases != former_dbs: + try: + revoke_dbs = [db for db in former_dbs if db not in databases] + user.revoke_user_access(db_names=revoke_dbs) + + new_dbs = [db for db in databases if db not in former_dbs] + user.grant_user_access(db_names=new_dbs) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) + + +def delete_user(module, cdb_id, name): + + for arg, value in dict(cdb_id=cdb_id, name=name).items(): + if not value: + module.fail_json(msg='%s is required for the "rax_cdb_user"' + ' module' % arg) + + cdb = pyrax.cloud_databases + + try: + instance = cdb.get(cdb_id) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + changed = False + + user = find_user(instance, name) + + if user: + try: + user.delete() + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + changed = True + + module.exit_json(changed=changed, action='delete') + + +def rax_cdb_user(module, state, cdb_id, name, password, databases, host): + + # act on the state + if state == 'present': + save_user(module, cdb_id, name, password, databases, host) + elif state == 'absent': + delete_user(module, cdb_id, name) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + cdb_id=dict(type='str', required=True), + db_username=dict(type='str', required=True), + db_password=dict(type='str', required=True, no_log=True), + databases=dict(type='list', elements='str', default=[]), + host=dict(type='str', default='%'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + cdb_id = module.params.get('cdb_id') + name = module.params.get('db_username') + password = module.params.get('db_password') + databases = module.params.get('databases') + host = to_text(module.params.get('host'), errors='surrogate_or_strict') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + rax_cdb_user(module, state, cdb_id, name, password, databases, host) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_clb.py b/ansible_collections/community/general/plugins/modules/rax_clb.py new file mode 100644 index 000000000..9a4ca4f89 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_clb.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_clb +short_description: Create / delete a load balancer in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud load balancer. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + algorithm: + type: str + description: + - algorithm for the balancer being created + choices: + - RANDOM + - LEAST_CONNECTIONS + - ROUND_ROBIN + - WEIGHTED_LEAST_CONNECTIONS + - WEIGHTED_ROUND_ROBIN + default: LEAST_CONNECTIONS + meta: + type: dict + default: {} + description: + - A hash of metadata to associate with the instance + name: + type: str + description: + - Name to give the load balancer + required: true + port: + type: int + description: + - Port for the balancer being created + default: 80 + protocol: + type: str + description: + - Protocol for the balancer being created + choices: + - DNS_TCP + - DNS_UDP + - FTP + - HTTP + - HTTPS + - IMAPS + - IMAPv4 + - LDAP + - LDAPS + - MYSQL + - POP3 + - POP3S + - SMTP + - TCP + - TCP_CLIENT_FIRST + - UDP + - UDP_STREAM + - SFTP + default: HTTP + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + timeout: + type: int + description: + - timeout for communication between the balancer and the node + default: 30 + type: + type: str + description: + - type of interface for the balancer being created + choices: + - PUBLIC + - SERVICENET + default: PUBLIC + vip_id: + type: str + description: + - Virtual IP ID to use when creating the load balancer for purposes of + sharing an IP with another load balancer of another protocol + wait: + description: + - wait for the balancer to be in state 'running' before returning + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds + default: 300 +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a Load Balancer + gather_facts: false + hosts: local + connection: local + tasks: + - name: Load Balancer create request + local_action: + module: rax_clb + credentials: ~/.raxpub + name: my-lb + port: 8080 + protocol: HTTP + type: SERVICENET + timeout: 30 + region: DFW + wait: true + state: present + meta: + app: my-cool-app + register: my_lb +''' + + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, + CLB_PROTOCOLS, + rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, + vip_type, timeout, wait, wait_timeout, vip_id): + if int(timeout) < 30: + module.fail_json(msg='"timeout" must be greater than or equal to 30') + + changed = False + balancers = [] + + clb = pyrax.cloud_loadbalancers + if not clb: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + balancer_list = clb.list() + while balancer_list: + retrieved = clb.list(marker=balancer_list.pop().id) + balancer_list.extend(retrieved) + if len(retrieved) < 2: + break + + for balancer in balancer_list: + if name != balancer.name and name != balancer.id: + continue + + balancers.append(balancer) + + if len(balancers) > 1: + module.fail_json(msg='Multiple Load Balancers were matched by name, ' + 'try using the Load Balancer ID instead') + + if state == 'present': + if isinstance(meta, dict): + metadata = [dict(key=k, value=v) for k, v in meta.items()] + + if not balancers: + try: + virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] + balancer = clb.create(name, metadata=metadata, port=port, + algorithm=algorithm, protocol=protocol, + timeout=timeout, virtual_ips=virtual_ips) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + balancer = balancers[0] + setattr(balancer, 'metadata', + [dict(key=k, value=v) for k, v in + balancer.get_metadata().items()]) + atts = { + 'name': name, + 'algorithm': algorithm, + 'port': port, + 'protocol': protocol, + 'timeout': timeout + } + for att, value in atts.items(): + current = getattr(balancer, att) + if current != value: + changed = True + + if changed: + balancer.update(**atts) + + if balancer.metadata != metadata: + balancer.set_metadata(meta) + changed = True + + virtual_ips = [clb.VirtualIP(type=vip_type)] + current_vip_types = set([v.type for v in balancer.virtual_ips]) + vip_types = set([v.type for v in virtual_ips]) + if current_vip_types != vip_types: + module.fail_json(msg='Load balancer Virtual IP type cannot ' + 'be changed') + + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.get() + instance = rax_to_dict(balancer, 'clb') + + result = dict(changed=changed, balancer=instance) + + if balancer.status == 'ERROR': + result['msg'] = '%s failed to build' % balancer.id + elif wait and balancer.status not in ('ACTIVE', 'ERROR'): + result['msg'] = 'Timeout waiting on %s' % balancer.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + if balancers: + balancer = balancers[0] + try: + balancer.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + instance = rax_to_dict(balancer, 'clb') + + if wait: + attempts = wait_timeout // 5 + pyrax.utils.wait_until(balancer, 'status', ('DELETED'), + interval=5, attempts=attempts) + else: + instance = {} + + module.exit_json(changed=changed, balancer=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + algorithm=dict(choices=CLB_ALGORITHMS, + default='LEAST_CONNECTIONS'), + meta=dict(type='dict', default={}), + name=dict(required=True), + port=dict(type='int', default=80), + protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), + state=dict(default='present', choices=['present', 'absent']), + timeout=dict(type='int', default=30), + type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), + vip_id=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + algorithm = module.params.get('algorithm') + meta = module.params.get('meta') + name = module.params.get('name') + port = module.params.get('port') + protocol = module.params.get('protocol') + state = module.params.get('state') + timeout = int(module.params.get('timeout')) + vip_id = module.params.get('vip_id') + vip_type = module.params.get('type') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, + vip_type, timeout, wait, wait_timeout, vip_id) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py new file mode 100644 index 000000000..219f0c2ba --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_clb_nodes +short_description: Add, modify and remove nodes from a Rackspace Cloud Load Balancer +description: + - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + address: + type: str + required: false + description: + - IP address or domain name of the node + condition: + type: str + required: false + choices: + - enabled + - disabled + - draining + description: + - Condition for the node, which determines its role within the load + balancer + load_balancer_id: + type: int + required: true + description: + - Load balancer id + node_id: + type: int + required: false + description: + - Node id + port: + type: int + required: false + description: + - Port number of the load balanced service on the node + state: + type: str + required: false + default: "present" + choices: + - present + - absent + description: + - Indicate desired state of the node + type: + type: str + required: false + choices: + - primary + - secondary + description: + - Type of node + wait: + required: false + default: false + type: bool + description: + - Wait for the load balancer to become active before returning + wait_timeout: + type: int + required: false + default: 30 + description: + - How long to wait before giving up and returning an error + weight: + type: int + required: false + description: + - Weight of node + virtualenv: + type: path + description: + - Virtualenv to execute this module in +author: "Lukasz Kawczynski (@neuroid)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Add a new node to the load balancer + local_action: + module: rax_clb_nodes + load_balancer_id: 71 + address: 10.2.2.3 + port: 80 + condition: enabled + type: primary + wait: true + credentials: /path/to/credentials + +- name: Drain connections from a node + local_action: + module: rax_clb_nodes + load_balancer_id: 71 + node_id: 410 + condition: draining + wait: true + credentials: /path/to/credentials + +- name: Remove a node from the load balancer + local_action: + module: rax_clb_nodes + load_balancer_id: 71 + node_id: 410 + state: absent + wait: true + credentials: /path/to/credentials +''' + +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module + + +def _activate_virtualenv(path): + activate_this = os.path.join(path, 'bin', 'activate_this.py') + with open(activate_this) as f: + code = compile(f.read(), activate_this, 'exec') + exec(code) + + +def _get_node(lb, node_id=None, address=None, port=None): + """Return a matching node""" + for node in getattr(lb, 'nodes', []): + match_list = [] + if node_id is not None: + match_list.append(getattr(node, 'id', None) == node_id) + if address is not None: + match_list.append(getattr(node, 'address', None) == address) + if port is not None: + match_list.append(getattr(node, 'port', None) == port) + + if match_list and all(match_list): + return node + + return None + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + condition=dict(choices=['enabled', 'disabled', 'draining']), + load_balancer_id=dict(required=True, type='int'), + node_id=dict(type='int'), + port=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']), + type=dict(choices=['primary', 'secondary']), + virtualenv=dict(type='path'), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=30, type='int'), + weight=dict(type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params['address'] + condition = (module.params['condition'] and + module.params['condition'].upper()) + load_balancer_id = module.params['load_balancer_id'] + node_id = module.params['node_id'] + port = module.params['port'] + state = module.params['state'] + typ = module.params['type'] and module.params['type'].upper() + virtualenv = module.params['virtualenv'] + wait = module.params['wait'] + wait_timeout = module.params['wait_timeout'] or 1 + weight = module.params['weight'] + + if virtualenv: + try: + _activate_virtualenv(virtualenv) + except IOError as e: + module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( + virtualenv, e)) + + setup_rax_module(module, pyrax) + + if not pyrax.cloud_loadbalancers: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + try: + lb = pyrax.cloud_loadbalancers.get(load_balancer_id) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + + node = _get_node(lb, node_id, address, port) + + result = rax_clb_node_to_dict(node) + + if state == 'absent': + if not node: # Removing a non-existent node + module.exit_json(changed=False, state=state) + try: + lb.delete_node(node) + result = {} + except pyrax.exc.NotFound: + module.exit_json(changed=False, state=state) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + else: # present + if not node: + if node_id: # Updating a non-existent node + msg = 'Node %d not found' % node_id + if lb.nodes: + msg += (' (available nodes: %s)' % + ', '.join([str(x.id) for x in lb.nodes])) + module.fail_json(msg=msg) + else: # Creating a new node + try: + node = pyrax.cloudloadbalancers.Node( + address=address, port=port, condition=condition, + weight=weight, type=typ) + resp, body = lb.add_nodes([node]) + result.update(body['nodes'][0]) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + else: # Updating an existing node + mutable = { + 'condition': condition, + 'type': typ, + 'weight': weight, + } + + for name in list(mutable): + value = mutable[name] + if value is None or value == getattr(node, name): + mutable.pop(name) + + if not mutable: + module.exit_json(changed=False, state=state, node=result) + + try: + # The diff has to be set explicitly to update node's weight and + # type; this should probably be fixed in pyrax + lb.update_node(node, diff=mutable) + result.update(mutable) + except pyrax.exc.PyraxException as e: + module.fail_json(msg='%s' % e.message) + + if wait: + pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, + attempts=wait_timeout) + if lb.status != 'ACTIVE': + module.fail_json( + msg='Load balancer not active after %ds (current status: %s)' % + (wait_timeout, lb.status.lower())) + + kwargs = {'node': result} if result else {} + module.exit_json(changed=True, state=state, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py new file mode 100644 index 000000000..5dca9d3ec --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: rax_clb_ssl +short_description: Manage SSL termination for a Rackspace Cloud Load Balancer +description: + - Set up, reconfigure, or remove SSL termination for an existing load balancer. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + loadbalancer: + type: str + description: + - Name or ID of the load balancer on which to manage SSL termination. + required: true + state: + type: str + description: + - If set to "present", SSL termination will be added to this load balancer. + - If "absent", SSL termination will be removed instead. + choices: + - present + - absent + default: present + enabled: + description: + - If set to "false", temporarily disable SSL termination without discarding + - existing credentials. + default: true + type: bool + private_key: + type: str + description: + - The private SSL key as a string in PEM format. + certificate: + type: str + description: + - The public SSL certificates as a string in PEM format. + intermediate_certificate: + type: str + description: + - One or more intermediate certificate authorities as a string in PEM + - format, concatenated into a single string. + secure_port: + type: int + description: + - The port to listen for secure traffic. + default: 443 + secure_traffic_only: + description: + - If "true", the load balancer will *only* accept secure traffic. + default: false + type: bool + https_redirect: + description: + - If "true", the load balancer will redirect HTTP traffic to HTTPS. + - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL + - termination is also applied or removed. + type: bool + wait: + description: + - Wait for the balancer to be in state "running" before turning. + default: false + type: bool + wait_timeout: + type: int + description: + - How long before "wait" gives up, in seconds. + default: 300 +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Enable SSL termination on a load balancer + community.general.rax_clb_ssl: + loadbalancer: the_loadbalancer + state: present + private_key: "{{ lookup('file', 'credentials/server.key' ) }}" + certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" + intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" + secure_traffic_only: true + wait: true + +- name: Disable SSL termination + community.general.rax_clb_ssl: + loadbalancer: "{{ registered_lb.balancer.id }}" + state: absent + wait: true +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_find_loadbalancer, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, + certificate, intermediate_certificate, secure_port, + secure_traffic_only, https_redirect, + wait, wait_timeout): + # Validate arguments. + + if state == 'present': + if not private_key: + module.fail_json(msg="private_key must be provided.") + else: + private_key = private_key.strip() + + if not certificate: + module.fail_json(msg="certificate must be provided.") + else: + certificate = certificate.strip() + + attempts = wait_timeout // 5 + + # Locate the load balancer. + + balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) + existing_ssl = balancer.get_ssl_termination() + + changed = False + + if state == 'present': + # Apply or reconfigure SSL termination on the load balancer. + ssl_attrs = dict( + securePort=secure_port, + privatekey=private_key, + certificate=certificate, + intermediateCertificate=intermediate_certificate, + enabled=enabled, + secureTrafficOnly=secure_traffic_only + ) + + needs_change = False + + if existing_ssl: + for ssl_attr, value in ssl_attrs.items(): + if ssl_attr == 'privatekey': + # The private key is not included in get_ssl_termination's + # output (as it shouldn't be). Also, if you're changing the + # private key, you'll also be changing the certificate, + # so we don't lose anything by not checking it. + continue + + if value is not None and existing_ssl.get(ssl_attr) != value: + # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) + needs_change = True + else: + needs_change = True + + if needs_change: + try: + balancer.add_ssl_termination(**ssl_attrs) + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + elif state == 'absent': + # Remove SSL termination if it's already configured. + if existing_ssl: + try: + balancer.delete_ssl_termination() + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + + if https_redirect is not None and balancer.httpsRedirect != https_redirect: + if changed: + # This wait is unavoidable because load balancers are immutable + # while the SSL termination changes above are being applied. + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + try: + balancer.update(httpsRedirect=https_redirect) + except pyrax.exceptions.PyraxException as e: + module.fail_json(msg='%s' % e.message) + changed = True + + if changed and wait: + pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) + + balancer.get() + new_ssl_termination = balancer.get_ssl_termination() + + # Intentionally omit the private key from the module output, so you don't + # accidentally echo it with `ansible-playbook -v` or `debug`, and the + # certificate, which is just long. Convert other attributes to snake_case + # and include https_redirect at the top-level. + if new_ssl_termination: + new_ssl = dict( + enabled=new_ssl_termination['enabled'], + secure_port=new_ssl_termination['securePort'], + secure_traffic_only=new_ssl_termination['secureTrafficOnly'] + ) + else: + new_ssl = None + + result = dict( + changed=changed, + https_redirect=balancer.httpsRedirect, + ssl_termination=new_ssl, + balancer=rax_to_dict(balancer, 'clb') + ) + success = True + + if balancer.status == 'ERROR': + result['msg'] = '%s failed to build' % balancer.id + success = False + elif wait and balancer.status not in ('ACTIVE', 'ERROR'): + result['msg'] = 'Timeout waiting on %s' % balancer.id + success = False + + if success: + module.exit_json(**result) + else: + module.fail_json(**result) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update(dict( + loadbalancer=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + enabled=dict(type='bool', default=True), + private_key=dict(no_log=True), + certificate=dict(), + intermediate_certificate=dict(), + secure_port=dict(type='int', default=443), + secure_traffic_only=dict(type='bool', default=False), + https_redirect=dict(type='bool'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module.') + + loadbalancer = module.params.get('loadbalancer') + state = module.params.get('state') + enabled = module.boolean(module.params.get('enabled')) + private_key = module.params.get('private_key') + certificate = module.params.get('certificate') + intermediate_certificate = module.params.get('intermediate_certificate') + secure_port = module.params.get('secure_port') + secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) + https_redirect = module.boolean(module.params.get('https_redirect')) + wait = module.boolean(module.params.get('wait')) + wait_timeout = module.params.get('wait_timeout') + + setup_rax_module(module, pyrax) + + cloud_load_balancer_ssl( + module, loadbalancer, state, enabled, private_key, certificate, + intermediate_certificate, secure_port, secure_traffic_only, + https_redirect, wait, wait_timeout + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_dns.py b/ansible_collections/community/general/plugins/modules/rax_dns.py new file mode 100644 index 000000000..e70b76914 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_dns.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_dns +short_description: Manage domains on Rackspace Cloud DNS +description: + - Manage domains on Rackspace Cloud DNS. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + comment: + type: str + description: + - Brief description of the domain. Maximum length of 160 characters + email: + type: str + description: + - Email address of the domain administrator + name: + type: str + description: + - Domain name to create + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + ttl: + type: int + description: + - Time to live of domain in seconds + default: 3600 +notes: + - "It is recommended that plays utilizing this module be run with + C(serial: 1) to avoid exceeding the API request limit imposed by + the Rackspace CloudDNS API" +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create domain + hosts: all + gather_facts: false + tasks: + - name: Domain create request + local_action: + module: rax_dns + credentials: ~/.raxpub + name: example.org + email: admin@example.org + register: rax_dns +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_dns(module, comment, email, name, state, ttl): + changed = False + + dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not email: + module.fail_json(msg='An "email" attribute is required for ' + 'creating a domain') + + try: + domain = dns.find(name=name) + except pyrax.exceptions.NoUniqueMatch as e: + module.fail_json(msg='%s' % e.message) + except pyrax.exceptions.NotFound: + try: + domain = dns.create(name=name, emailAddress=email, ttl=ttl, + comment=comment) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + update = {} + if comment != getattr(domain, 'comment', None): + update['comment'] = comment + if ttl != getattr(domain, 'ttl', None): + update['ttl'] = ttl + if email != getattr(domain, 'emailAddress', None): + update['emailAddress'] = email + + if update: + try: + domain.update(**update) + changed = True + domain.get() + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + domain = dns.find(name=name) + except pyrax.exceptions.NotFound: + domain = {} + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if domain: + try: + domain.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, domain=rax_to_dict(domain)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + comment=dict(), + email=dict(), + name=dict(), + state=dict(default='present', choices=['present', 'absent']), + ttl=dict(type='int', default=3600), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + comment = module.params.get('comment') + email = module.params.get('email') + name = module.params.get('name') + state = module.params.get('state') + ttl = module.params.get('ttl') + + setup_rax_module(module, pyrax, False) + + rax_dns(module, comment, email, name, state, ttl) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/rax_dns_record.py new file mode 100644 index 000000000..fd3ad47ce --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_dns_record.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_dns_record +short_description: Manage DNS records on Rackspace Cloud DNS +description: + - Manage DNS records on Rackspace Cloud DNS. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + comment: + type: str + description: + - Brief description of the domain. Maximum length of 160 characters + data: + type: str + description: + - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for + SRV/TXT + required: true + domain: + type: str + description: + - Domain name to create the record in. This is an invalid option when + type=PTR + loadbalancer: + type: str + description: + - Load Balancer ID to create a PTR record for. Only used with type=PTR + name: + type: str + description: + - FQDN record name to create + required: true + overwrite: + description: + - Add new records if data doesn't match, instead of updating existing + record with matching name. If there are already multiple records with + matching name and overwrite=true, this module will fail. + default: true + type: bool + priority: + type: int + description: + - Required for MX and SRV records, but forbidden for other record types. + If specified, must be an integer from 0 to 65535. + server: + type: str + description: + - Server ID to create a PTR record for. Only used with type=PTR + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + ttl: + type: int + description: + - Time to live of record in seconds + default: 3600 + type: + type: str + description: + - DNS record type + choices: + - A + - AAAA + - CNAME + - MX + - NS + - SRV + - TXT + - PTR + required: true +notes: + - "It is recommended that plays utilizing this module be run with + C(serial: 1) to avoid exceeding the API request limit imposed by + the Rackspace CloudDNS API" + - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be + supplied + - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. + - C(PTR) record support was added in version 1.7 +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create DNS Records + hosts: all + gather_facts: false + tasks: + - name: Create A record + local_action: + module: rax_dns_record + credentials: ~/.raxpub + domain: example.org + name: www.example.org + data: "{{ rax_accessipv4 }}" + type: A + register: a_record + + - name: Create PTR record + local_action: + module: rax_dns_record + credentials: ~/.raxpub + server: "{{ rax_id }}" + name: "{{ inventory_hostname }}" + region: DFW + register: ptr_record +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_find_loadbalancer, + rax_find_server, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, + name=None, server=None, state='present', ttl=7200): + changed = False + results = [] + + dns = pyrax.cloud_dns + + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if loadbalancer: + item = rax_find_loadbalancer(module, pyrax, loadbalancer) + elif server: + item = rax_find_server(module, pyrax, server) + + if state == 'present': + current = dns.list_ptr_records(item) + for record in current: + if record.data == data: + if record.ttl != ttl or record.name != name: + try: + dns.update_ptr_record(item, record, name, data, ttl) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + record.ttl = ttl + record.name = name + results.append(rax_to_dict(record)) + break + else: + results.append(rax_to_dict(record)) + break + + if not results: + record = dict(name=name, type='PTR', data=data, ttl=ttl, + comment=comment) + try: + results = dns.add_ptr_records(item, [record]) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, records=results) + + elif state == 'absent': + current = dns.list_ptr_records(item) + for record in current: + if record.data == data: + results.append(rax_to_dict(record)) + break + + if results: + try: + dns.delete_ptr_records(item, data) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, records=results) + + +def rax_dns_record(module, comment=None, data=None, domain=None, name=None, + overwrite=True, priority=None, record_type='A', + state='present', ttl=7200): + """Function for manipulating record types other than PTR""" + + changed = False + + dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not priority and record_type in ['MX', 'SRV']: + module.fail_json(msg='A "priority" attribute is required for ' + 'creating a MX or SRV record') + + try: + domain = dns.find(name=domain) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + try: + if overwrite: + record = domain.find_record(record_type, name=name) + else: + record = domain.find_record(record_type, name=name, data=data) + except pyrax.exceptions.DomainRecordNotUnique as e: + module.fail_json(msg='overwrite=true and there are multiple matching records') + except pyrax.exceptions.DomainRecordNotFound as e: + try: + record_data = { + 'type': record_type, + 'name': name, + 'data': data, + 'ttl': ttl + } + if comment: + record_data.update(dict(comment=comment)) + if priority and record_type.upper() in ['MX', 'SRV']: + record_data.update(dict(priority=priority)) + + record = domain.add_records([record_data])[0] + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + update = {} + if comment != getattr(record, 'comment', None): + update['comment'] = comment + if ttl != getattr(record, 'ttl', None): + update['ttl'] = ttl + if priority != getattr(record, 'priority', None): + update['priority'] = priority + if data != getattr(record, 'data', None): + update['data'] = data + + if update: + try: + record.update(**update) + changed = True + record.get() + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + domain = dns.find(name=domain) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + try: + record = domain.find_record(record_type, name=name, data=data) + except pyrax.exceptions.DomainRecordNotFound as e: + record = {} + except pyrax.exceptions.DomainRecordNotUnique as e: + module.fail_json(msg='%s' % e.message) + + if record: + try: + record.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, record=rax_to_dict(record)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + comment=dict(), + data=dict(required=True), + domain=dict(), + loadbalancer=dict(), + name=dict(required=True), + overwrite=dict(type='bool', default=True), + priority=dict(type='int'), + server=dict(), + state=dict(default='present', choices=['present', 'absent']), + ttl=dict(type='int', default=3600), + type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', + 'SRV', 'TXT', 'PTR']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[ + ['server', 'loadbalancer', 'domain'], + ], + required_one_of=[ + ['server', 'loadbalancer', 'domain'], + ], + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + comment = module.params.get('comment') + data = module.params.get('data') + domain = module.params.get('domain') + loadbalancer = module.params.get('loadbalancer') + name = module.params.get('name') + overwrite = module.params.get('overwrite') + priority = module.params.get('priority') + server = module.params.get('server') + state = module.params.get('state') + ttl = module.params.get('ttl') + record_type = module.params.get('type') + + setup_rax_module(module, pyrax, False) + + if record_type.upper() == 'PTR': + if not server and not loadbalancer: + module.fail_json(msg='one of the following is required: ' + 'server,loadbalancer') + rax_dns_record_ptr(module, data=data, comment=comment, + loadbalancer=loadbalancer, name=name, server=server, + state=state, ttl=ttl) + else: + rax_dns_record(module, comment=comment, data=data, domain=domain, + name=name, overwrite=overwrite, priority=priority, + record_type=record_type, state=state, ttl=ttl) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_facts.py b/ansible_collections/community/general/plugins/modules/rax_facts.py new file mode 100644 index 000000000..9e63fec38 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_facts.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_facts +short_description: Gather facts for Rackspace Cloud Servers +description: + - Gather facts for Rackspace Cloud Servers. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + address: + type: str + description: + - Server IP address to retrieve facts for, will match any IP assigned to + the server + id: + type: str + description: + - Server ID to retrieve facts for + name: + type: str + description: + - Server name to retrieve facts for +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module + +''' + +EXAMPLES = ''' +- name: Gather info about servers + hosts: all + gather_facts: false + tasks: + - name: Get facts about servers + local_action: + module: rax_facts + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW + - name: Map some facts + ansible.builtin.set_fact: + ansible_ssh_host: "{{ rax_accessipv4 }}" +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_facts(module, address, name, server_id): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + ansible_facts = {} + + search_opts = {} + if name: + search_opts = dict(name='^%s$' % name) + try: + servers = cs.servers.list(search_opts=search_opts) + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif address: + servers = [] + try: + for server in cs.servers.list(): + for addresses in server.networks.values(): + if address in addresses: + servers.append(server) + break + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif server_id: + servers = [] + try: + servers.append(cs.servers.get(server_id)) + except Exception as e: + pass + + servers[:] = [server for server in servers if server.status != "DELETED"] + + if len(servers) > 1: + module.fail_json(msg='Multiple servers found matching provided ' + 'search parameters') + elif len(servers) == 1: + ansible_facts = rax_to_dict(servers[0], 'server') + + module.exit_json(changed=changed, ansible_facts=ansible_facts) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + id=dict(), + name=dict(), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[['address', 'id', 'name']], + required_one_of=[['address', 'id', 'name']], + supports_check_mode=True, + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params.get('address') + server_id = module.params.get('id') + name = module.params.get('name') + + setup_rax_module(module, pyrax) + + rax_facts(module, address, name, server_id) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_files.py b/ansible_collections/community/general/plugins/modules/rax_files.py new file mode 100644 index 000000000..2d52ebc0f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_files.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2013, Paul Durivage +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_files +short_description: Manipulate Rackspace Cloud Files Containers +description: + - Manipulate Rackspace Cloud Files Containers. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + clear_meta: + description: + - Optionally clear existing metadata when applying metadata to existing containers. + Selecting this option is only appropriate when setting type=meta + type: bool + default: false + container: + type: str + description: + - The container to use for container or metadata operations. + meta: + type: dict + default: {} + description: + - A hash of items to set as metadata values on a container + private: + description: + - Used to set a container as private, removing it from the CDN. B(Warning!) + Private containers, if previously made public, can have live objects + available until the TTL on cached objects expires + type: bool + default: false + public: + description: + - Used to set a container as public, available via the Cloud Files CDN + type: bool + default: false + region: + type: str + description: + - Region to create an instance in + state: + type: str + description: + - Indicate desired state of the resource + choices: ['present', 'absent', 'list'] + default: present + ttl: + type: int + description: + - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. + Setting a TTL is only appropriate for containers that are public + type: + type: str + description: + - Type of object to do work on, i.e. metadata object or a container object + choices: + - container + - meta + default: container + web_error: + type: str + description: + - Sets an object to be presented as the HTTP error page when accessed by the CDN URL + web_index: + type: str + description: + - Sets an object to be presented as the HTTP index page when accessed by the CDN URL +author: "Paul Durivage (@angstwad)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: "Test Cloud Files Containers" + hosts: local + gather_facts: false + tasks: + - name: "List all containers" + community.general.rax_files: + state: list + + - name: "Create container called 'mycontainer'" + community.general.rax_files: + container: mycontainer + + - name: "Create container 'mycontainer2' with metadata" + community.general.rax_files: + container: mycontainer2 + meta: + key: value + file_for: someuser@example.com + + - name: "Set a container's web index page" + community.general.rax_files: + container: mycontainer + web_index: index.html + + - name: "Set a container's web error page" + community.general.rax_files: + container: mycontainer + web_error: error.html + + - name: "Make container public" + community.general.rax_files: + container: mycontainer + public: true + + - name: "Make container public with a 24 hour TTL" + community.general.rax_files: + container: mycontainer + public: true + ttl: 86400 + + - name: "Make container private" + community.general.rax_files: + container: mycontainer + private: true + +- name: "Test Cloud Files Containers Metadata Storage" + hosts: local + gather_facts: false + tasks: + - name: "Get mycontainer2 metadata" + community.general.rax_files: + container: mycontainer2 + type: meta + + - name: "Set mycontainer2 metadata" + community.general.rax_files: + container: mycontainer2 + type: meta + meta: + uploaded_by: someuser@example.com + + - name: "Remove mycontainer2 metadata" + community.general.rax_files: + container: "mycontainer2" + type: meta + state: absent + meta: + key: "" + file_for: "" +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError as e: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +EXIT_DICT = dict(success=True) +META_PREFIX = 'x-container-meta-' + + +def _get_container(module, cf, container): + try: + return cf.get_container(container) + except pyrax.exc.NoSuchContainer as e: + module.fail_json(msg=e.message) + + +def _fetch_meta(module, container): + EXIT_DICT['meta'] = dict() + try: + for k, v in container.get_metadata().items(): + split_key = k.split(META_PREFIX)[-1] + EXIT_DICT['meta'][split_key] = v + except Exception as e: + module.fail_json(msg=e.message) + + +def meta(cf, module, container_, state, meta_, clear_meta): + c = _get_container(module, cf, container_) + + if meta_ and state == 'present': + try: + meta_set = c.set_metadata(meta_, clear=clear_meta) + except Exception as e: + module.fail_json(msg=e.message) + elif meta_ and state == 'absent': + remove_results = [] + for k, v in meta_.items(): + c.remove_metadata_key(k) + remove_results.append(k) + EXIT_DICT['deleted_meta_keys'] = remove_results + elif state == 'absent': + remove_results = [] + for k, v in c.get_metadata().items(): + c.remove_metadata_key(k) + remove_results.append(k) + EXIT_DICT['deleted_meta_keys'] = remove_results + + _fetch_meta(module, c) + _locals = locals().keys() + + EXIT_DICT['container'] = c.name + if 'meta_set' in _locals or 'remove_results' in _locals: + EXIT_DICT['changed'] = True + + module.exit_json(**EXIT_DICT) + + +def container(cf, module, container_, state, meta_, clear_meta, ttl, public, + private, web_index, web_error): + if public and private: + module.fail_json(msg='container cannot be simultaneously ' + 'set to public and private') + + if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): + module.fail_json(msg='state cannot be omitted when setting/removing ' + 'attributes on a container') + + if state == 'list': + # We don't care if attributes are specified, let's list containers + EXIT_DICT['containers'] = cf.list_containers() + module.exit_json(**EXIT_DICT) + + try: + c = cf.get_container(container_) + except pyrax.exc.NoSuchContainer as e: + # Make the container if state=present, otherwise bomb out + if state == 'present': + try: + c = cf.create_container(container_) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['changed'] = True + EXIT_DICT['created'] = True + else: + module.fail_json(msg=e.message) + else: + # Successfully grabbed a container object + # Delete if state is absent + if state == 'absent': + try: + cont_deleted = c.delete() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['deleted'] = True + + if meta_: + try: + meta_set = c.set_metadata(meta_, clear=clear_meta) + except Exception as e: + module.fail_json(msg=e.message) + finally: + _fetch_meta(module, c) + + if ttl: + try: + c.cdn_ttl = ttl + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['ttl'] = c.cdn_ttl + + if public: + try: + cont_public = c.make_public() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, + ssl_url=c.cdn_ssl_uri, + streaming_url=c.cdn_streaming_uri, + ios_uri=c.cdn_ios_uri) + + if private: + try: + cont_private = c.make_private() + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_private'] = True + + if web_index: + try: + cont_web_index = c.set_web_index_page(web_index) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_index'] = True + finally: + _fetch_meta(module, c) + + if web_error: + try: + cont_err_index = c.set_web_error_page(web_error) + except Exception as e: + module.fail_json(msg=e.message) + else: + EXIT_DICT['set_error'] = True + finally: + _fetch_meta(module, c) + + EXIT_DICT['container'] = c.name + EXIT_DICT['objs_in_container'] = c.object_count + EXIT_DICT['total_bytes'] = c.total_bytes + + _locals = locals().keys() + if ('cont_deleted' in _locals + or 'meta_set' in _locals + or 'cont_public' in _locals + or 'cont_private' in _locals + or 'cont_web_index' in _locals + or 'cont_err_index' in _locals): + EXIT_DICT['changed'] = True + + module.exit_json(**EXIT_DICT) + + +def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error): + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles + + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if typ == "container": + container(cf, module, container_, state, meta_, clear_meta, ttl, + public, private, web_index, web_error) + else: + meta(cf, module, container_, state, meta_, clear_meta) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + container=dict(), + state=dict(choices=['present', 'absent', 'list'], + default='present'), + meta=dict(type='dict', default=dict()), + clear_meta=dict(default=False, type='bool'), + type=dict(choices=['container', 'meta'], default='container'), + ttl=dict(type='int'), + public=dict(default=False, type='bool'), + private=dict(default=False, type='bool'), + web_index=dict(), + web_error=dict() + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + container_ = module.params.get('container') + state = module.params.get('state') + meta_ = module.params.get('meta') + clear_meta = module.params.get('clear_meta') + typ = module.params.get('type') + ttl = module.params.get('ttl') + public = module.params.get('public') + private = module.params.get('private') + web_index = module.params.get('web_index') + web_error = module.params.get('web_error') + + if state in ['present', 'absent'] and not container_: + module.fail_json(msg='please specify a container name') + if clear_meta and not typ == 'meta': + module.fail_json(msg='clear_meta can only be used when setting ' + 'metadata') + + setup_rax_module(module, pyrax) + cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/rax_files_objects.py new file mode 100644 index 000000000..08a5cd4e2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_files_objects.py @@ -0,0 +1,558 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2013, Paul Durivage +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_files_objects +short_description: Upload, download, and delete objects in Rackspace Cloud Files +description: + - Upload, download, and delete objects in Rackspace Cloud Files. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + clear_meta: + description: + - Optionally clear existing metadata when applying metadata to existing objects. + Selecting this option is only appropriate when setting I(type=meta). + type: bool + default: false + container: + type: str + description: + - The container to use for file object operations. + required: true + dest: + type: str + description: + - The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder). + Used to specify the destination of an operation on a remote object; i.e. a file name, + C(file1), or a comma-separated list of remote objects, C(file1,file2,file17). + expires: + type: int + description: + - Used to set an expiration in seconds on an uploaded file or folder. + meta: + type: dict + default: {} + description: + - Items to set as metadata values on an uploaded file or folder. + method: + type: str + description: + - > + The method of operation to be performed: C(put) to upload files, C(get) to download files or + C(delete) to remove remote objects in Cloud Files. + choices: + - get + - put + - delete + default: get + src: + type: str + description: + - Source from which to upload files. Used to specify a remote object as a source for + an operation, i.e. a file name, C(file1), or a comma-separated list of remote objects, + C(file1,file2,file17). Parameters I(src) and I(dest) are mutually exclusive on remote-only object operations + structure: + description: + - Used to specify whether to maintain nested directory structure when downloading objects + from Cloud Files. Setting to false downloads the contents of a container to a single, + flat directory + type: bool + default: true + type: + type: str + description: + - Type of object to do work on + - Metadata object or a file object + choices: + - file + - meta + default: file +author: "Paul Durivage (@angstwad)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: "Test Cloud Files Objects" + hosts: local + gather_facts: false + tasks: + - name: "Get objects from test container" + community.general.rax_files_objects: + container: testcont + dest: ~/Downloads/testcont + + - name: "Get single object from test container" + community.general.rax_files_objects: + container: testcont + src: file1 + dest: ~/Downloads/testcont + + - name: "Get several objects from test container" + community.general.rax_files_objects: + container: testcont + src: file1,file2,file3 + dest: ~/Downloads/testcont + + - name: "Delete one object in test container" + community.general.rax_files_objects: + container: testcont + method: delete + dest: file1 + + - name: "Delete several objects in test container" + community.general.rax_files_objects: + container: testcont + method: delete + dest: file2,file3,file4 + + - name: "Delete all objects in test container" + community.general.rax_files_objects: + container: testcont + method: delete + + - name: "Upload all files to test container" + community.general.rax_files_objects: + container: testcont + method: put + src: ~/Downloads/onehundred + + - name: "Upload one file to test container" + community.general.rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file1 + + - name: "Upload one file to test container with metadata" + community.general.rax_files_objects: + container: testcont + src: ~/Downloads/testcont/file2 + method: put + meta: + testkey: testdata + who_uploaded_this: someuser@example.com + + - name: "Upload one file to test container with TTL of 60 seconds" + community.general.rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file3 + expires: 60 + + - name: "Attempt to get remote object that does not exist" + community.general.rax_files_objects: + container: testcont + method: get + src: FileThatDoesNotExist.jpg + dest: ~/Downloads/testcont + ignore_errors: true + + - name: "Attempt to delete remote object that does not exist" + community.general.rax_files_objects: + container: testcont + method: delete + dest: FileThatDoesNotExist.jpg + ignore_errors: true + +- name: "Test Cloud Files Objects Metadata" + hosts: local + gather_facts: false + tasks: + - name: "Get metadata on one object" + community.general.rax_files_objects: + container: testcont + type: meta + dest: file2 + + - name: "Get metadata on several objects" + community.general.rax_files_objects: + container: testcont + type: meta + src: file2,file1 + + - name: "Set metadata on an object" + community.general.rax_files_objects: + container: testcont + type: meta + dest: file17 + method: put + meta: + key1: value1 + key2: value2 + clear_meta: true + + - name: "Verify metadata is set" + community.general.rax_files_objects: + container: testcont + type: meta + src: file17 + + - name: "Delete metadata" + community.general.rax_files_objects: + container: testcont + type: meta + dest: file17 + method: delete + meta: + key1: '' + key2: '' + + - name: "Get metadata on all objects" + community.general.rax_files_objects: + container: testcont + type: meta +''' + +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +EXIT_DICT = dict(success=False) +META_PREFIX = 'x-object-meta-' + + +def _get_container(module, cf, container): + try: + return cf.get_container(container) + except pyrax.exc.NoSuchContainer as e: + module.fail_json(msg=e.message) + + +def _upload_folder(cf, folder, container, ttl=None, headers=None): + """ Uploads a folder to Cloud Files. + """ + total_bytes = 0 + for root, dummy, files in os.walk(folder): + for fname in files: + full_path = os.path.join(root, fname) + obj_name = os.path.relpath(full_path, folder) + obj_size = os.path.getsize(full_path) + cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) + total_bytes += obj_size + return total_bytes + + +def upload(module, cf, container, src, dest, meta, expires): + """ Uploads a single object or a folder to Cloud Files Optionally sets an + metadata, TTL value (expires), or Content-Disposition and Content-Encoding + headers. + """ + if not src: + module.fail_json(msg='src must be specified when uploading') + + c = _get_container(module, cf, container) + src = os.path.abspath(os.path.expanduser(src)) + is_dir = os.path.isdir(src) + + if not is_dir and not os.path.isfile(src) or not os.path.exists(src): + module.fail_json(msg='src must be a file or a directory') + if dest and is_dir: + module.fail_json(msg='dest cannot be set when whole ' + 'directories are uploaded') + + cont_obj = None + total_bytes = 0 + try: + if dest and not is_dir: + cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) + elif is_dir: + total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) + else: + cont_obj = c.upload_file(src, ttl=expires, headers=meta) + except Exception as e: + module.fail_json(msg=e.message) + + EXIT_DICT['success'] = True + EXIT_DICT['container'] = c.name + EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) + if cont_obj or total_bytes > 0: + EXIT_DICT['changed'] = True + if meta: + EXIT_DICT['meta'] = dict(updated=True) + + if cont_obj: + EXIT_DICT['bytes'] = cont_obj.total_bytes + EXIT_DICT['etag'] = cont_obj.etag + else: + EXIT_DICT['bytes'] = total_bytes + + module.exit_json(**EXIT_DICT) + + +def download(module, cf, container, src, dest, structure): + """ Download objects from Cloud Files to a local path specified by "dest". + Optionally disable maintaining a directory structure by by passing a + false value to "structure". + """ + # Looking for an explicit destination + if not dest: + module.fail_json(msg='dest is a required argument when ' + 'downloading from Cloud Files') + + # Attempt to fetch the container by name + c = _get_container(module, cf, container) + + # Accept a single object name or a comma-separated list of objs + # If not specified, get the entire container + if src: + objs = map(str.strip, src.split(',')) + else: + objs = c.get_object_names() + + dest = os.path.abspath(os.path.expanduser(dest)) + is_dir = os.path.isdir(dest) + + if not is_dir: + module.fail_json(msg='dest must be a directory') + + try: + results = [c.download_object(obj, dest, structure=structure) for obj in objs] + except Exception as e: + module.fail_json(msg=e.message) + + len_results = len(results) + len_objs = len(objs) + + EXIT_DICT['container'] = c.name + EXIT_DICT['requested_downloaded'] = results + if results: + EXIT_DICT['changed'] = True + if len_results == len_objs: + EXIT_DICT['success'] = True + EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) + else: + EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ + "downloaded" % (len_results, len_objs) + module.exit_json(**EXIT_DICT) + + +def delete(module, cf, container, src, dest): + """ Delete specific objects by proving a single file name or a + comma-separated list to src OR dest (but not both). Omitting file name(s) + assumes the entire container is to be deleted. + """ + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to be deleted " + "have been specified on both src and dest args") + + c = _get_container(module, cf, container) + + objs = dest or src + if objs: + objs = map(str.strip, objs.split(',')) + else: + objs = c.get_object_names() + + num_objs = len(objs) + + try: + results = [c.delete_object(obj) for obj in objs] + except Exception as e: + module.fail_json(msg=e.message) + + num_deleted = results.count(True) + + EXIT_DICT['container'] = c.name + EXIT_DICT['deleted'] = num_deleted + EXIT_DICT['requested_deleted'] = objs + + if num_deleted: + EXIT_DICT['changed'] = True + + if num_objs == num_deleted: + EXIT_DICT['success'] = True + EXIT_DICT['msg'] = "%s objects deleted" % num_deleted + else: + EXIT_DICT['msg'] = ("Error: only %s of %s objects " + "deleted" % (num_deleted, num_objs)) + module.exit_json(**EXIT_DICT) + + +def get_meta(module, cf, container, src, dest): + """ Get metadata for a single file, comma-separated list, or entire + container + """ + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to be deleted " + "have been specified on both src and dest args") + + c = _get_container(module, cf, container) + + objs = dest or src + if objs: + objs = map(str.strip, objs.split(',')) + else: + objs = c.get_object_names() + + try: + results = dict() + for obj in objs: + meta = c.get_object(obj).get_metadata() + results[obj] = dict((k.split(META_PREFIX)[-1], v) for k, v in meta.items()) + except Exception as e: + module.fail_json(msg=e.message) + + EXIT_DICT['container'] = c.name + if results: + EXIT_DICT['meta_results'] = results + EXIT_DICT['success'] = True + module.exit_json(**EXIT_DICT) + + +def put_meta(module, cf, container, src, dest, meta, clear_meta): + """ Set metadata on a container, single file, or comma-separated list. + Passing a true value to clear_meta clears the metadata stored in Cloud + Files before setting the new metadata to the value of "meta". + """ + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; files to set meta" + " have been specified on both src and dest args") + objs = dest or src + objs = map(str.strip, objs.split(',')) + + c = _get_container(module, cf, container) + + try: + results = [c.get_object(obj).set_metadata(meta, clear=clear_meta) for obj in objs] + except Exception as e: + module.fail_json(msg=e.message) + + EXIT_DICT['container'] = c.name + EXIT_DICT['success'] = True + if results: + EXIT_DICT['changed'] = True + EXIT_DICT['num_changed'] = True + module.exit_json(**EXIT_DICT) + + +def delete_meta(module, cf, container, src, dest, meta): + """ Removes metadata keys and values specified in meta, if any. Deletes on + all objects specified by src or dest (but not both), if any; otherwise it + deletes keys on all objects in the container + """ + if src and dest: + module.fail_json(msg="Error: ambiguous instructions; meta keys to be " + "deleted have been specified on both src and dest" + " args") + objs = dest or src + objs = map(str.strip, objs.split(',')) + + c = _get_container(module, cf, container) + + try: + for obj in objs: + o = c.get_object(obj) + results = [ + o.remove_metadata_key(k) + for k in (meta or o.get_metadata()) + ] + except Exception as e: + module.fail_json(msg=e.message) + + EXIT_DICT['container'] = c.name + EXIT_DICT['success'] = True + if results: + EXIT_DICT['changed'] = True + EXIT_DICT['num_deleted'] = len(results) + module.exit_json(**EXIT_DICT) + + +def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, + structure, expires): + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles + + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if typ == "file": + if method == 'get': + download(module, cf, container, src, dest, structure) + + if method == 'put': + upload(module, cf, container, src, dest, meta, expires) + + if method == 'delete': + delete(module, cf, container, src, dest) + + else: + if method == 'get': + get_meta(module, cf, container, src, dest) + + if method == 'put': + put_meta(module, cf, container, src, dest, meta, clear_meta) + + if method == 'delete': + delete_meta(module, cf, container, src, dest, meta) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + container=dict(required=True), + src=dict(), + dest=dict(), + method=dict(default='get', choices=['put', 'get', 'delete']), + type=dict(default='file', choices=['file', 'meta']), + meta=dict(type='dict', default=dict()), + clear_meta=dict(default=False, type='bool'), + structure=dict(default=True, type='bool'), + expires=dict(type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + container = module.params.get('container') + src = module.params.get('src') + dest = module.params.get('dest') + method = module.params.get('method') + typ = module.params.get('type') + meta = module.params.get('meta') + clear_meta = module.params.get('clear_meta') + structure = module.params.get('structure') + expires = module.params.get('expires') + + if clear_meta and not typ == 'meta': + module.fail_json(msg='clear_meta can only be used when setting metadata') + + setup_rax_module(module, pyrax) + cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_identity.py b/ansible_collections/community/general/plugins/modules/rax_identity.py new file mode 100644 index 000000000..19f803953 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_identity.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_identity +short_description: Load Rackspace Cloud Identity +description: + - Verifies Rackspace Cloud credentials and returns identity information. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Indicate desired state of the resource + choices: ['present'] + default: present + required: false +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Load Rackspace Cloud Identity + gather_facts: false + hosts: local + connection: local + tasks: + - name: Load Identity + local_action: + module: rax_identity + credentials: ~/.raxpub + region: DFW + register: rackspace_identity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, + setup_rax_module) + + +def cloud_identity(module, state, identity): + instance = dict( + authenticated=identity.authenticated, + credentials=identity._creds_file + ) + changed = False + + instance.update(rax_to_dict(identity)) + instance['services'] = instance.get('services', {}).keys() + + if state == 'present': + if not identity.authenticated: + module.fail_json(msg='Credentials could not be verified!') + + module.exit_json(changed=changed, identity=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + if not pyrax.identity: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + cloud_identity(module, state, pyrax.identity) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_keypair.py b/ansible_collections/community/general/plugins/modules/rax_keypair.py new file mode 100644 index 000000000..22750f03c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_keypair.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_keypair +short_description: Create a keypair for use with Rackspace Cloud Servers +description: + - Create a keypair for use with Rackspace Cloud Servers. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - Name of keypair + required: true + public_key: + type: str + description: + - Public Key string to upload. Can be a file path or string + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: "Matt Martz (@sivel)" +notes: + - Keypairs cannot be manipulated, only created and deleted. To "update" a + keypair you must first delete and then recreate. + - The ability to specify a file path for the public key was added in 1.7 +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create a keypair + hosts: localhost + gather_facts: false + tasks: + - name: Keypair request + local_action: + module: rax_keypair + credentials: ~/.raxpub + name: my_keypair + region: DFW + register: keypair + - name: Create local public key + local_action: + module: copy + content: "{{ keypair.keypair.public_key }}" + dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" + - name: Create local private key + local_action: + module: copy + content: "{{ keypair.keypair.private_key }}" + dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" + +- name: Create a keypair + hosts: localhost + gather_facts: false + tasks: + - name: Keypair request + local_action: + module: rax_keypair + credentials: ~/.raxpub + name: my_keypair + public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" + region: DFW + register: keypair +''' +import os + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, + rax_required_together, + rax_to_dict, + setup_rax_module, + ) + + +def rax_keypair(module, name, public_key, state): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + keypair = {} + + if state == 'present': + if public_key and os.path.isfile(public_key): + try: + f = open(public_key) + public_key = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % public_key) + + try: + keypair = cs.keypairs.find(name=name) + except cs.exceptions.NotFound: + try: + keypair = cs.keypairs.create(name, public_key) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + keypair = cs.keypairs.find(name=name) + except Exception: + pass + + if keypair: + try: + keypair.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(required=True), + public_key=dict(), + state=dict(default='present', choices=['absent', 'present']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + public_key = module.params.get('public_key') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + rax_keypair(module, name, public_key, state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_meta.py b/ansible_collections/community/general/plugins/modules/rax_meta.py new file mode 100644 index 000000000..751300858 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_meta.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_meta +short_description: Manipulate metadata for Rackspace Cloud Servers +description: + - Manipulate metadata for Rackspace Cloud Servers. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + address: + type: str + description: + - Server IP address to modify metadata for, will match any IP assigned to + the server + id: + type: str + description: + - Server ID to modify metadata for + name: + type: str + description: + - Server name to modify metadata for + meta: + type: dict + default: {} + description: + - A hash of metadata to associate with the instance +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Set metadata for a server + hosts: all + gather_facts: false + tasks: + - name: Set metadata + local_action: + module: rax_meta + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW + meta: + group: primary_group + groups: + - group_two + - group_three + app: my_app + + - name: Clear metadata + local_action: + module: rax_meta + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: DFW +''' + +import json + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module +from ansible.module_utils.six import string_types + + +def rax_meta(module, address, name, server_id, meta): + changed = False + + cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + search_opts = {} + if name: + search_opts = dict(name='^%s$' % name) + try: + servers = cs.servers.list(search_opts=search_opts) + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif address: + servers = [] + try: + for server in cs.servers.list(): + for addresses in server.networks.values(): + if address in addresses: + servers.append(server) + break + except Exception as e: + module.fail_json(msg='%s' % e.message) + elif server_id: + servers = [] + try: + servers.append(cs.servers.get(server_id)) + except Exception as e: + pass + + if len(servers) > 1: + module.fail_json(msg='Multiple servers found matching provided ' + 'search parameters') + elif not servers: + module.fail_json(msg='Failed to find a server matching provided ' + 'search parameters') + + # Normalize and ensure all metadata values are strings + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + server = servers[0] + if server.metadata == meta: + changed = False + else: + changed = True + removed = set(server.metadata.keys()).difference(meta.keys()) + cs.servers.delete_meta(server, list(removed)) + cs.servers.set_meta(server, meta) + server.get() + + module.exit_json(changed=changed, meta=server.metadata) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + address=dict(), + id=dict(), + name=dict(), + meta=dict(type='dict', default=dict()), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[['address', 'id', 'name']], + required_one_of=[['address', 'id', 'name']], + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + address = module.params.get('address') + server_id = module.params.get('id') + name = module.params.get('name') + meta = module.params.get('meta') + + setup_rax_module(module, pyrax) + + rax_meta(module, address, name, server_id, meta) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py new file mode 100644 index 000000000..f6e650ec0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_mon_alarm +short_description: Create or delete a Rackspace Cloud Monitoring alarm +description: + - Create or delete a Rackspace Cloud Monitoring alarm that associates an + existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with + criteria that specify what conditions will trigger which levels of + notifications. Rackspace monitoring module flow | rax_mon_entity -> + rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> + *rax_mon_alarm*. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Ensure that the alarm with this C(label) exists or does not exist. + choices: [ "present", "absent" ] + required: false + default: present + label: + type: str + description: + - Friendly name for this alarm, used to achieve idempotence. Must be a String + between 1 and 255 characters long. + required: true + entity_id: + type: str + description: + - ID of the entity this alarm is attached to. May be acquired by registering + the value of a rax_mon_entity task. + required: true + check_id: + type: str + description: + - ID of the check that should be alerted on. May be acquired by registering + the value of a rax_mon_check task. + required: true + notification_plan_id: + type: str + description: + - ID of the notification plan to trigger if this alarm fires. May be acquired + by registering the value of a rax_mon_notification_plan task. + required: true + criteria: + type: str + description: + - Alarm DSL that describes alerting conditions and their output states. Must + be between 1 and 16384 characters long. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html + for a reference on the alerting language. + disabled: + description: + - If yes, create this alarm, but leave it in an inactive state. Defaults to + no. + type: bool + default: false + metadata: + type: dict + description: + - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String + keys and values between 1 and 255 characters long. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Alarm example + gather_facts: false + hosts: local + connection: local + tasks: + - name: Ensure that a specific alarm exists. + community.general.rax_mon_alarm: + credentials: ~/.rax_pub + state: present + label: uhoh + entity_id: "{{ the_entity['entity']['id'] }}" + check_id: "{{ the_check['check']['id'] }}" + notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" + criteria: > + if (rate(metric['average']) > 10) { + return new AlarmStatus(WARNING); + } + return new AlarmStatus(OK); + register: the_alarm +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, + disabled, metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + if criteria and len(criteria) < 1 or len(criteria) > 16384: + module.fail_json(msg='criteria must be between 1 and 16384 characters long') + + # Coerce attributes. + + changed = False + alarm = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [a for a in cm.list_alarms(entity_id) if a.label == label] + + if existing: + alarm = existing[0] + + if state == 'present': + should_create = False + should_update = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s existing alarms have the label %s.' % + (len(existing), label)) + + if alarm: + if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: + should_delete = should_create = True + + should_update = (disabled and disabled != alarm.disabled) or \ + (metadata and metadata != alarm.metadata) or \ + (criteria and criteria != alarm.criteria) + + if should_update and not should_delete: + cm.update_alarm(entity=entity_id, alarm=alarm, + criteria=criteria, disabled=disabled, + label=label, metadata=metadata) + changed = True + + if should_delete: + alarm.delete() + changed = True + else: + should_create = True + + if should_create: + alarm = cm.create_alarm(entity=entity_id, check=check_id, + notification_plan=notification_plan_id, + criteria=criteria, disabled=disabled, label=label, + metadata=metadata) + changed = True + else: + for a in existing: + a.delete() + changed = True + + if alarm: + alarm_dict = { + "id": alarm.id, + "label": alarm.label, + "check_id": alarm.check_id, + "notification_plan_id": alarm.notification_plan_id, + "criteria": alarm.criteria, + "disabled": alarm.disabled, + "metadata": alarm.metadata + } + module.exit_json(changed=changed, alarm=alarm_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + entity_id=dict(required=True), + check_id=dict(required=True), + notification_plan_id=dict(required=True), + criteria=dict(), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + entity_id = module.params.get('entity_id') + check_id = module.params.get('check_id') + notification_plan_id = module.params.get('notification_plan_id') + criteria = module.params.get('criteria') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + alarm(module, state, label, entity_id, check_id, notification_plan_id, + criteria, disabled, metadata) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/rax_mon_check.py new file mode 100644 index 000000000..6a0ad03a3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_mon_check.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_mon_check +short_description: Create or delete a Rackspace Cloud Monitoring check for an + existing entity. +description: + - Create or delete a Rackspace Cloud Monitoring check associated with an + existing rax_mon_entity. A check is a specific test or measurement that is + performed, possibly from different monitoring zones, on the systems you + monitor. Rackspace monitoring module flow | rax_mon_entity -> + *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> + rax_mon_alarm + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Ensure that a check with this C(label) exists or does not exist. + choices: ["present", "absent"] + default: present + entity_id: + type: str + description: + - ID of the rax_mon_entity to target with this check. + required: true + label: + type: str + description: + - Defines a label for this check, between 1 and 64 characters long. + required: true + check_type: + type: str + description: + - The type of check to create. C(remote.) checks may be created on any + rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities + that have a non-null C(agent_id). + - | + Choices for this option are: + - C(remote.dns) + - C(remote.ftp-banner) + - C(remote.http) + - C(remote.imap-banner) + - C(remote.mssql-banner) + - C(remote.mysql-banner) + - C(remote.ping) + - C(remote.pop3-banner) + - C(remote.postgresql-banner) + - C(remote.smtp-banner) + - C(remote.smtp) + - C(remote.ssh) + - C(remote.tcp) + - C(remote.telnet-banner) + - C(agent.filesystem) + - C(agent.memory) + - C(agent.load_average) + - C(agent.cpu) + - C(agent.disk) + - C(agent.network) + - C(agent.plugin) + required: true + monitoring_zones_poll: + type: str + description: + - Comma-separated list of the names of the monitoring zones the check should + run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, + mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. + target_hostname: + type: str + description: + - One of I(target_hostname) and I(target_alias) is required for remote.* checks, + but prohibited for agent.* checks. The hostname this check should target. + Must be a valid IPv4, IPv6, or FQDN. + target_alias: + type: str + description: + - One of I(target_alias) and I(target_hostname) is required for remote.* checks, + but prohibited for agent.* checks. Use the corresponding key in the entity's + I(ip_addresses) hash to resolve an IP address to target. + details: + type: dict + default: {} + description: + - Additional details specific to the check type. Must be a hash of strings + between 1 and 255 characters long, or an array or object containing 0 to + 256 items. + disabled: + description: + - If C(true), ensure the check is created, but don't actually use it yet. + type: bool + default: false + metadata: + type: dict + default: {} + description: + - Hash of arbitrary key-value pairs to accompany this check if it fires. + Keys and values must be strings between 1 and 255 characters long. + period: + type: int + description: + - The number of seconds between each time the check is performed. Must be + greater than the minimum period set on your account. + timeout: + type: int + description: + - The number of seconds this check will wait when attempting to collect + results. Must be less than the period. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Create a monitoring check + gather_facts: false + hosts: local + connection: local + tasks: + - name: Associate a check with an existing entity. + community.general.rax_mon_check: + credentials: ~/.rax_pub + state: present + entity_id: "{{ the_entity['entity']['id'] }}" + label: the_check + check_type: remote.ping + monitoring_zones_poll: mziad,mzord,mzdfw + details: + count: 10 + meta: + hurf: durf + register: the_check +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout): + + # Coerce attributes. + + if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): + monitoring_zones_poll = [monitoring_zones_poll] + + if period: + period = int(period) + + if timeout: + timeout = int(timeout) + + changed = False + check = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + entity = cm.get_entity(entity_id) + if not entity: + module.fail_json(msg='Failed to instantiate entity. "%s" may not be' + ' a valid entity id.' % entity_id) + + existing = [e for e in entity.list_checks() if e.label == label] + + if existing: + check = existing[0] + + if state == 'present': + if len(existing) > 1: + module.fail_json(msg='%s existing checks have a label of %s.' % + (len(existing), label)) + + should_delete = False + should_create = False + should_update = False + + if check: + # Details may include keys set to default values that are not + # included in the initial creation. + # + # Only force a recreation of the check if one of the *specified* + # keys is missing or has a different value. + if details: + for (key, value) in details.items(): + if key not in check.details: + should_delete = should_create = True + elif value != check.details[key]: + should_delete = should_create = True + + should_update = label != check.label or \ + (target_hostname and target_hostname != check.target_hostname) or \ + (target_alias and target_alias != check.target_alias) or \ + (disabled != check.disabled) or \ + (metadata and metadata != check.metadata) or \ + (period and period != check.period) or \ + (timeout and timeout != check.timeout) or \ + (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) + + if should_update and not should_delete: + check.update(label=label, + disabled=disabled, + metadata=metadata, + monitoring_zones_poll=monitoring_zones_poll, + timeout=timeout, + period=period, + target_alias=target_alias, + target_hostname=target_hostname) + changed = True + else: + # The check doesn't exist yet. + should_create = True + + if should_delete: + check.delete() + + if should_create: + check = cm.create_check(entity, + label=label, + check_type=check_type, + target_hostname=target_hostname, + target_alias=target_alias, + monitoring_zones_poll=monitoring_zones_poll, + details=details, + disabled=disabled, + metadata=metadata, + period=period, + timeout=timeout) + changed = True + elif state == 'absent': + if check: + check.delete() + changed = True + else: + module.fail_json(msg='state must be either present or absent.') + + if check: + check_dict = { + "id": check.id, + "label": check.label, + "type": check.type, + "target_hostname": check.target_hostname, + "target_alias": check.target_alias, + "monitoring_zones_poll": check.monitoring_zones_poll, + "details": check.details, + "disabled": check.disabled, + "metadata": check.metadata, + "period": check.period, + "timeout": check.timeout + } + module.exit_json(changed=changed, check=check_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + entity_id=dict(required=True), + label=dict(required=True), + check_type=dict(required=True), + monitoring_zones_poll=dict(), + target_hostname=dict(), + target_alias=dict(), + details=dict(type='dict', default={}), + disabled=dict(type='bool', default=False), + metadata=dict(type='dict', default={}), + period=dict(type='int'), + timeout=dict(type='int'), + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + entity_id = module.params.get('entity_id') + label = module.params.get('label') + check_type = module.params.get('check_type') + monitoring_zones_poll = module.params.get('monitoring_zones_poll') + target_hostname = module.params.get('target_hostname') + target_alias = module.params.get('target_alias') + details = module.params.get('details') + disabled = module.boolean(module.params.get('disabled')) + metadata = module.params.get('metadata') + period = module.params.get('period') + timeout = module.params.get('timeout') + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_check(module, state, entity_id, label, check_type, + monitoring_zones_poll, target_hostname, target_alias, details, + disabled, metadata, period, timeout) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py new file mode 100644 index 000000000..b42bd173b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_mon_entity +short_description: Create or delete a Rackspace Cloud Monitoring entity +description: + - Create or delete a Rackspace Cloud Monitoring entity, which represents a device + to monitor. Entities associate checks and alarms with a target system and + provide a convenient, centralized place to store IP addresses. Rackspace + monitoring module flow | *rax_mon_entity* -> rax_mon_check -> + rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + label: + type: str + description: + - Defines a name for this entity. Must be a non-empty string between 1 and + 255 characters long. + required: true + state: + type: str + description: + - Ensure that an entity with this C(name) exists or does not exist. + choices: ["present", "absent"] + default: present + agent_id: + type: str + description: + - Rackspace monitoring agent on the target device to which this entity is + bound. Necessary to collect C(agent.) rax_mon_checks against this entity. + named_ip_addresses: + type: dict + default: {} + description: + - Hash of IP addresses that may be referenced by name by rax_mon_checks + added to this entity. Must be a dictionary of with keys that are names + between 1 and 64 characters long, and values that are valid IPv4 or IPv6 + addresses. + metadata: + type: dict + default: {} + description: + - Hash of arbitrary C(name), C(value) pairs that are passed to associated + rax_mon_alarms. Names and values must all be between 1 and 255 characters + long. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Entity example + gather_facts: false + hosts: local + connection: local + tasks: + - name: Ensure an entity exists + community.general.rax_mon_entity: + credentials: ~/.rax_pub + state: present + label: my_entity + named_ip_addresses: + web_box: 192.0.2.4 + db_box: 192.0.2.5 + meta: + hurf: durf + register: the_entity +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, + metadata): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for entity in cm.list_entities(): + if label == entity.label: + existing.append(entity) + + entity = None + + if existing: + entity = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing entities have the label %s.' % + (len(existing), label)) + + if entity: + if named_ip_addresses and named_ip_addresses != entity.ip_addresses: + should_delete = should_create = True + + # Change an existing Entity, unless there's nothing to do. + should_update = agent_id and agent_id != entity.agent_id or \ + (metadata and metadata != entity.metadata) + + if should_update and not should_delete: + entity.update(agent_id, metadata) + changed = True + + if should_delete: + entity.delete() + else: + should_create = True + + if should_create: + # Create a new Entity. + entity = cm.create_entity(label=label, agent=agent_id, + ip_addresses=named_ip_addresses, + metadata=metadata) + changed = True + else: + # Delete the existing Entities. + for e in existing: + e.delete() + changed = True + + if entity: + entity_dict = { + "id": entity.id, + "name": entity.name, + "agent_id": entity.agent_id, + } + module.exit_json(changed=changed, entity=entity_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + agent_id=dict(), + named_ip_addresses=dict(type='dict', default={}), + metadata=dict(type='dict', default={}) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + agent_id = module.params.get('agent_id') + named_ip_addresses = module.params.get('named_ip_addresses') + metadata = module.params.get('metadata') + + setup_rax_module(module, pyrax) + + cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py new file mode 100644 index 000000000..91d079359 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_mon_notification +short_description: Create or delete a Rackspace Cloud Monitoring notification +description: +- Create or delete a Rackspace Cloud Monitoring notification that specifies a + channel that can be used to communicate alarms, such as email, webhooks, or + PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> + *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Ensure that the notification with this C(label) exists or does not exist. + choices: ['present', 'absent'] + default: present + label: + type: str + description: + - Defines a friendly name for this notification. String between 1 and 255 + characters long. + required: true + notification_type: + type: str + description: + - A supported notification type. + choices: ["webhook", "email", "pagerduty"] + required: true + details: + type: dict + description: + - Dictionary of key-value pairs used to initialize the notification. + Required keys and meanings vary with notification type. See + http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ + service-notification-types-crud.html for details. + required: true +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Monitoring notification example + gather_facts: false + hosts: local + connection: local + tasks: + - name: Email me when something goes wrong. + rax_mon_entity: + credentials: ~/.rax_pub + label: omg + type: email + details: + address: me@mailhost.com + register: the_notification +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def notification(module, state, label, notification_type, details): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notifications(): + if n.label == label: + existing.append(n) + + if existing: + notification = existing[0] + + if state == 'present': + should_update = False + should_delete = False + should_create = False + + if len(existing) > 1: + module.fail_json(msg='%s existing notifications are labelled %s.' % + (len(existing), label)) + + if notification: + should_delete = (notification_type != notification.type) + + should_update = (details != notification.details) + + if should_update and not should_delete: + notification.update(details=notification.details) + changed = True + + if should_delete: + notification.delete() + else: + should_create = True + + if should_create: + notification = cm.create_notification(notification_type, + label=label, details=details) + changed = True + else: + for n in existing: + n.delete() + changed = True + + if notification: + notification_dict = { + "id": notification.id, + "type": notification.type, + "label": notification.label, + "details": notification.details + } + module.exit_json(changed=changed, notification=notification_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), + details=dict(required=True, type='dict') + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + notification_type = module.params.get('notification_type') + details = module.params.get('details') + + setup_rax_module(module, pyrax) + + notification(module, state, label, notification_type, details) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py new file mode 100644 index 000000000..ac8b189aa --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_mon_notification_plan +short_description: Create or delete a Rackspace Cloud Monitoring notification + plan. +description: + - Create or delete a Rackspace Cloud Monitoring notification plan by + associating existing rax_mon_notifications with severity levels. Rackspace + monitoring module flow | rax_mon_entity -> rax_mon_check -> + rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm. + - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API. + - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Ensure that the notification plan with this C(label) exists or does not + exist. + choices: ['present', 'absent'] + default: present + label: + type: str + description: + - Defines a friendly name for this notification plan. String between 1 and + 255 characters long. + required: true + critical_state: + type: list + elements: str + description: + - Notification list to use when the alarm state is CRITICAL. Must be an + array of valid rax_mon_notification ids. + warning_state: + type: list + elements: str + description: + - Notification list to use when the alarm state is WARNING. Must be an array + of valid rax_mon_notification ids. + ok_state: + type: list + elements: str + description: + - Notification list to use when the alarm state is OK. Must be an array of + valid rax_mon_notification ids. +author: Ash Wilson (@smashwilson) +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Example notification plan + gather_facts: false + hosts: local + connection: local + tasks: + - name: Establish who gets called when. + community.general.rax_mon_notification_plan: + credentials: ~/.rax_pub + state: present + label: defcon1 + critical_state: + - "{{ everyone['notification']['id'] }}" + warning_state: + - "{{ opsfloor['notification']['id'] }}" + register: defcon1 +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def notification_plan(module, state, label, critical_state, warning_state, ok_state): + + if len(label) < 1 or len(label) > 255: + module.fail_json(msg='label must be between 1 and 255 characters long') + + changed = False + notification_plan = None + + cm = pyrax.cloud_monitoring + if not cm: + module.fail_json(msg='Failed to instantiate client. This typically ' + 'indicates an invalid region or an incorrectly ' + 'capitalized region name.') + + existing = [] + for n in cm.list_notification_plans(): + if n.label == label: + existing.append(n) + + if existing: + notification_plan = existing[0] + + if state == 'present': + should_create = False + should_delete = False + + if len(existing) > 1: + module.fail_json(msg='%s notification plans are labelled %s.' % + (len(existing), label)) + + if notification_plan: + should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ + (warning_state and warning_state != notification_plan.warning_state) or \ + (ok_state and ok_state != notification_plan.ok_state) + + if should_delete: + notification_plan.delete() + should_create = True + else: + should_create = True + + if should_create: + notification_plan = cm.create_notification_plan(label=label, + critical_state=critical_state, + warning_state=warning_state, + ok_state=ok_state) + changed = True + else: + for np in existing: + np.delete() + changed = True + + if notification_plan: + notification_plan_dict = { + "id": notification_plan.id, + "critical_state": notification_plan.critical_state, + "warning_state": notification_plan.warning_state, + "ok_state": notification_plan.ok_state, + "metadata": notification_plan.metadata + } + module.exit_json(changed=changed, notification_plan=notification_plan_dict) + else: + module.exit_json(changed=changed) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']), + label=dict(required=True), + critical_state=dict(type='list', elements='str'), + warning_state=dict(type='list', elements='str'), + ok_state=dict(type='list', elements='str'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + + label = module.params.get('label') + critical_state = module.params.get('critical_state') + warning_state = module.params.get('warning_state') + ok_state = module.params.get('ok_state') + + setup_rax_module(module, pyrax) + + notification_plan(module, state, label, critical_state, warning_state, ok_state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_network.py b/ansible_collections/community/general/plugins/modules/rax_network.py new file mode 100644 index 000000000..22f148366 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_network.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_network +short_description: Create / delete an isolated network in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud isolated network. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + label: + type: str + description: + - Label (name) to give the network + required: true + cidr: + type: str + description: + - cidr of the network being created +author: + - "Christopher H. Laco (@claco)" + - "Jesse Keating (@omgjlk)" +extends_documentation_fragment: + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build an Isolated Network + gather_facts: false + + tasks: + - name: Network create request + local_action: + module: rax_network + credentials: ~/.raxpub + label: my-net + cidr: 192.168.3.0/24 + state: present +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_network(module, state, label, cidr): + changed = False + network = None + networks = [] + + if not pyrax.cloud_networks: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + + try: + network = pyrax.cloud_networks.find_network_by_label(label) + except pyrax.exceptions.NetworkNotFound: + try: + network = pyrax.cloud_networks.create(label, cidr=cidr) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + elif state == 'absent': + try: + network = pyrax.cloud_networks.find_network_by_label(label) + network.delete() + changed = True + except pyrax.exceptions.NetworkNotFound: + pass + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if network: + instance = dict(id=network.id, + label=network.label, + cidr=network.cidr) + networks.append(instance) + + module.exit_json(changed=changed, networks=networks) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', + choices=['present', 'absent']), + label=dict(required=True), + cidr=dict() + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + state = module.params.get('state') + label = module.params.get('label') + cidr = module.params.get('cidr') + + setup_rax_module(module, pyrax) + + cloud_network(module, state, label, cidr) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_queue.py b/ansible_collections/community/general/plugins/modules/rax_queue.py new file mode 100644 index 000000000..00f730b27 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_queue.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_queue +short_description: Create / delete a queue in Rackspace Public Cloud +description: + - creates / deletes a Rackspace Public Cloud queue. +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - Name to give the queue + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +- name: Build a Queue + gather_facts: false + hosts: local + connection: local + tasks: + - name: Queue create request + local_action: + module: rax_queue + credentials: ~/.raxpub + name: my-queue + region: DFW + state: present + register: my_queue +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module + + +def cloud_queue(module, state, name): + for arg in (state, name): + if not arg: + module.fail_json(msg='%s is required for rax_queue' % arg) + + changed = False + queues = [] + instance = {} + + cq = pyrax.queues + if not cq: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + for queue in cq.list(): + if name != queue.name: + continue + + queues.append(queue) + + if len(queues) > 1: + module.fail_json(msg='Multiple Queues were matched by name') + + if state == 'present': + if not queues: + try: + queue = cq.create(name) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + queue = queues[0] + + instance = dict(name=queue.name) + result = dict(changed=changed, queue=instance) + module.exit_json(**result) + + elif state == 'absent': + if queues: + queue = queues[0] + try: + queue.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, queue=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + name=dict(), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + name = module.params.get('name') + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_queue(module, state, name) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py new file mode 100644 index 000000000..677a75b33 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py @@ -0,0 +1,441 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_scaling_group +short_description: Manipulate Rackspace Cloud Autoscale Groups +description: + - Manipulate Rackspace Cloud Autoscale Groups +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + type: bool + default: false + cooldown: + type: int + description: + - The period of time, in seconds, that must pass before any scaling can + occur after the previous scaling. Must be an integer between 0 and + 86400 (24 hrs). + default: 300 + disk_config: + type: str + description: + - Disk partitioning strategy + - If not specified, it will fallback to C(auto). + choices: + - auto + - manual + files: + type: dict + default: {} + description: + - 'Files to insert into the instance. Hash of C(remotepath: localpath)' + flavor: + type: str + description: + - flavor to use for the instance + required: true + image: + type: str + description: + - image to use for the instance. Can be an C(id), C(human_id) or C(name) + required: true + key_name: + type: str + description: + - key pair to use on the instance + loadbalancers: + type: list + elements: dict + description: + - List of load balancer C(id) and C(port) hashes + max_entities: + type: int + description: + - The maximum number of entities that are allowed in the scaling group. + Must be an integer between 0 and 1000. + required: true + meta: + type: dict + default: {} + description: + - A hash of metadata to associate with the instance + min_entities: + type: int + description: + - The minimum number of entities that are allowed in the scaling group. + Must be an integer between 0 and 1000. + required: true + name: + type: str + description: + - Name to give the scaling group + required: true + networks: + type: list + elements: str + description: + - The network to attach to the instances. If specified, you must include + ALL networks including the public and private interfaces. Can be C(id) + or C(label). + default: + - public + - private + server_name: + type: str + description: + - The base name for servers created by Autoscale + required: true + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present + user_data: + type: str + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + wait: + description: + - wait for the scaling group to finish provisioning the minimum amount of + servers + type: bool + default: false + wait_timeout: + type: int + description: + - how long before wait gives up, in seconds + default: 300 +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: false + connection: local + tasks: + - community.general.rax_scaling_group: + credentials: ~/.raxpub + region: ORD + cooldown: 300 + flavor: performance1-1 + image: bb02b1a3-bc77-4d17-ab5b-421d89850fca + min_entities: 5 + max_entities: 10 + name: ASG Test + server_name: asgtest + loadbalancers: + - id: 228385 + port: 80 + register: asg +''' + +import base64 +import json +import os +import time + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import ( + rax_argument_spec, rax_find_image, rax_find_network, + rax_required_together, rax_to_dict, setup_rax_module, + rax_scaling_group_personality_file, +) +from ansible.module_utils.six import string_types + + +def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, + image=None, key_name=None, loadbalancers=None, meta=None, + min_entities=0, max_entities=0, name=None, networks=None, + server_name=None, state='present', user_data=None, + config_drive=False, wait=True, wait_timeout=300): + files = {} if files is None else files + loadbalancers = [] if loadbalancers is None else loadbalancers + meta = {} if meta is None else meta + networks = [] if networks is None else networks + + changed = False + + au = pyrax.autoscale + if not au: + module.fail_json(msg='Failed to instantiate clients. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if user_data: + config_drive = True + + if user_data and os.path.isfile(user_data): + try: + f = open(user_data) + user_data = f.read() + f.close() + except Exception as e: + module.fail_json(msg='Failed to load %s' % user_data) + + if state == 'present': + # Normalize and ensure all metadata values are strings + if meta: + for k, v in meta.items(): + if isinstance(v, list): + meta[k] = ','.join(['%s' % i for i in v]) + elif isinstance(v, dict): + meta[k] = json.dumps(v) + elif not isinstance(v, string_types): + meta[k] = '%s' % v + + if image: + image = rax_find_image(module, pyrax, image) + + nics = [] + if networks: + for network in networks: + nics.extend(rax_find_network(module, pyrax, network)) + + for nic in nics: + # pyrax is currently returning net-id, but we need uuid + # this check makes this forward compatible for a time when + # pyrax uses uuid instead + if nic.get('net-id'): + nic.update(uuid=nic['net-id']) + del nic['net-id'] + + # Handle the file contents + personality = rax_scaling_group_personality_file(module, files) + + lbs = [] + if loadbalancers: + for lb in loadbalancers: + try: + lb_id = int(lb.get('id')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer ID is not an integer: ' + '%s' % lb.get('id')) + try: + port = int(lb.get('port')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer port is not an ' + 'integer: %s' % lb.get('port')) + if not lb_id or not port: + continue + lbs.append((lb_id, port)) + + try: + sg = au.find(name=name) + except pyrax.exceptions.NoUniqueMatch as e: + module.fail_json(msg='%s' % e.message) + except pyrax.exceptions.NotFound: + try: + sg = au.create(name, cooldown=cooldown, + min_entities=min_entities, + max_entities=max_entities, + launch_config_type='launch_server', + server_name=server_name, image=image, + flavor=flavor, disk_config=disk_config, + metadata=meta, personality=personality, + networks=nics, load_balancers=lbs, + key_name=key_name, config_drive=config_drive, + user_data=user_data) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if not changed: + # Scaling Group Updates + group_args = {} + if cooldown != sg.cooldown: + group_args['cooldown'] = cooldown + + if min_entities != sg.min_entities: + group_args['min_entities'] = min_entities + + if max_entities != sg.max_entities: + group_args['max_entities'] = max_entities + + if group_args: + changed = True + sg.update(**group_args) + + # Launch Configuration Updates + lc = sg.get_launch_config() + lc_args = {} + if server_name != lc.get('name'): + lc_args['server_name'] = server_name + + if image != lc.get('image'): + lc_args['image'] = image + + if flavor != lc.get('flavor'): + lc_args['flavor'] = flavor + + disk_config = disk_config or 'AUTO' + if ((disk_config or lc.get('disk_config')) and + disk_config != lc.get('disk_config', 'AUTO')): + lc_args['disk_config'] = disk_config + + if (meta or lc.get('meta')) and meta != lc.get('metadata'): + lc_args['metadata'] = meta + + test_personality = [] + for p in personality: + test_personality.append({ + 'path': p['path'], + 'contents': base64.b64encode(p['contents']) + }) + if ((test_personality or lc.get('personality')) and + test_personality != lc.get('personality')): + lc_args['personality'] = personality + + if nics != lc.get('networks'): + lc_args['networks'] = nics + + if lbs != lc.get('load_balancers'): + # Work around for https://github.com/rackspace/pyrax/pull/393 + lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) + + if key_name != lc.get('key_name'): + lc_args['key_name'] = key_name + + if config_drive != lc.get('config_drive', False): + lc_args['config_drive'] = config_drive + + if (user_data and + base64.b64encode(user_data) != lc.get('user_data')): + lc_args['user_data'] = user_data + + if lc_args: + # Work around for https://github.com/rackspace/pyrax/pull/389 + if 'flavor' not in lc_args: + lc_args['flavor'] = lc.get('flavor') + changed = True + sg.update_launch_config(**lc_args) + + sg.get() + + if wait: + end_time = time.time() + wait_timeout + infinite = wait_timeout == 0 + while infinite or time.time() < end_time: + state = sg.get_state() + if state["pending_capacity"] == 0: + break + + time.sleep(5) + + module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) + + else: + try: + sg = au.find(name=name) + sg.delete() + changed = True + except pyrax.exceptions.NotFound as e: + sg = {} + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + config_drive=dict(default=False, type='bool'), + cooldown=dict(type='int', default=300), + disk_config=dict(choices=['auto', 'manual']), + files=dict(type='dict', default={}), + flavor=dict(required=True), + image=dict(required=True), + key_name=dict(), + loadbalancers=dict(type='list', elements='dict'), + meta=dict(type='dict', default={}), + min_entities=dict(type='int', required=True), + max_entities=dict(type='int', required=True), + name=dict(required=True), + networks=dict(type='list', elements='str', default=['public', 'private']), + server_name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=300, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + config_drive = module.params.get('config_drive') + cooldown = module.params.get('cooldown') + disk_config = module.params.get('disk_config') + if disk_config: + disk_config = disk_config.upper() + files = module.params.get('files') + flavor = module.params.get('flavor') + image = module.params.get('image') + key_name = module.params.get('key_name') + loadbalancers = module.params.get('loadbalancers') + meta = module.params.get('meta') + min_entities = module.params.get('min_entities') + max_entities = module.params.get('max_entities') + name = module.params.get('name') + networks = module.params.get('networks') + server_name = module.params.get('server_name') + state = module.params.get('state') + user_data = module.params.get('user_data') + + if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: + module.fail_json(msg='min_entities and max_entities must be an ' + 'integer between 0 and 1000') + + if not 0 <= cooldown <= 86400: + module.fail_json(msg='cooldown must be an integer between 0 and 86400') + + setup_rax_module(module, pyrax) + + rax_asg(module, cooldown=cooldown, disk_config=disk_config, + files=files, flavor=flavor, image=image, meta=meta, + key_name=key_name, loadbalancers=loadbalancers, + min_entities=min_entities, max_entities=max_entities, + name=name, networks=networks, server_name=server_name, + state=state, config_drive=config_drive, user_data=user_data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py new file mode 100644 index 000000000..60b48bb2a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rax_scaling_policy +short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy +description: + - Manipulate Rackspace Cloud Autoscale Scaling Policy +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + at: + type: str + description: + - The UTC time when this policy will be executed. The time must be + formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as + C(2013-05-19T08:07:08Z) + change: + type: int + description: + - The change, either as a number of servers or as a percentage, to make + in the scaling group. If this is a percentage, you must set + I(is_percent) to C(true) also. + cron: + type: str + description: + - The time when the policy will be executed, as a cron entry. For + example, if this is parameter is set to C(1 0 * * *) + cooldown: + type: int + description: + - The period of time, in seconds, that must pass before any scaling can + occur after the previous scaling. Must be an integer between 0 and + 86400 (24 hrs). + default: 300 + desired_capacity: + type: int + description: + - The desired server capacity of the scaling the group; that is, how + many servers should be in the scaling group. + is_percent: + description: + - Whether the value in I(change) is a percent value + default: false + type: bool + name: + type: str + description: + - Name to give the policy + required: true + policy_type: + type: str + description: + - The type of policy that will be executed for the current release. + choices: + - webhook + - schedule + required: true + scaling_group: + type: str + description: + - Name of the scaling group that this policy will be added to + required: true + state: + type: str + description: + - Indicate desired state of the resource + choices: + - present + - absent + default: present +author: "Matt Martz (@sivel)" +extends_documentation_fragment: + - community.general.rackspace + - community.general.rackspace.openstack + - community.general.attributes + +''' + +EXAMPLES = ''' +--- +- hosts: localhost + gather_facts: false + connection: local + tasks: + - community.general.rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + at: '2013-05-19T08:07:08Z' + change: 25 + cooldown: 300 + is_percent: true + name: ASG Test Policy - at + policy_type: schedule + scaling_group: ASG Test + register: asps_at + + - community.general.rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + cron: '1 0 * * *' + change: 25 + cooldown: 300 + is_percent: true + name: ASG Test Policy - cron + policy_type: schedule + scaling_group: ASG Test + register: asp_cron + + - community.general.rax_scaling_policy: + credentials: ~/.raxpub + region: ORD + cooldown: 300 + desired_capacity: 5 + name: ASG Test Policy - webhook + policy_type: webhook + scaling_group: ASG Test + register: asp_webhook +''' + +try: + import pyrax + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, + setup_rax_module) + + +def rax_asp(module, at=None, change=0, cron=None, cooldown=300, + desired_capacity=0, is_percent=False, name=None, + policy_type=None, scaling_group=None, state='present'): + changed = False + + au = pyrax.autoscale + if not au: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + try: + UUID(scaling_group) + except ValueError: + try: + sg = au.find(name=scaling_group) + except Exception as e: + module.fail_json(msg='%s' % e.message) + else: + try: + sg = au.get(scaling_group) + except Exception as e: + module.fail_json(msg='%s' % e.message) + + if state == 'present': + policies = filter(lambda p: name == p.name, sg.list_policies()) + if len(policies) > 1: + module.fail_json(msg='No unique policy match found by name') + if at: + args = dict(at=at) + elif cron: + args = dict(cron=cron) + else: + args = None + + if not policies: + try: + policy = sg.add_policy(name, policy_type=policy_type, + cooldown=cooldown, change=change, + is_percent=is_percent, + desired_capacity=desired_capacity, + args=args) + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + else: + policy = policies[0] + kwargs = {} + if policy_type != policy.type: + kwargs['policy_type'] = policy_type + + if cooldown != policy.cooldown: + kwargs['cooldown'] = cooldown + + if hasattr(policy, 'change') and change != policy.change: + kwargs['change'] = change + + if hasattr(policy, 'changePercent') and is_percent is False: + kwargs['change'] = change + kwargs['is_percent'] = False + elif hasattr(policy, 'change') and is_percent is True: + kwargs['change'] = change + kwargs['is_percent'] = True + + if hasattr(policy, 'desiredCapacity') and change: + kwargs['change'] = change + elif ((hasattr(policy, 'change') or + hasattr(policy, 'changePercent')) and desired_capacity): + kwargs['desired_capacity'] = desired_capacity + + if hasattr(policy, 'args') and args != policy.args: + kwargs['args'] = args + + if kwargs: + policy.update(**kwargs) + changed = True + + policy.get() + + module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) + + else: + try: + policies = filter(lambda p: name == p.name, sg.list_policies()) + if len(policies) > 1: + module.fail_json(msg='No unique policy match found by name') + elif not policies: + policy = {} + else: + policy.delete() + changed = True + except Exception as e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + at=dict(), + change=dict(type='int'), + cron=dict(), + cooldown=dict(type='int', default=300), + desired_capacity=dict(type='int'), + is_percent=dict(type='bool', default=False), + name=dict(required=True), + policy_type=dict(required=True, choices=['webhook', 'schedule']), + scaling_group=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together(), + mutually_exclusive=[ + ['cron', 'at'], + ['change', 'desired_capacity'], + ] + ) + + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + at = module.params.get('at') + change = module.params.get('change') + cron = module.params.get('cron') + cooldown = module.params.get('cooldown') + desired_capacity = module.params.get('desired_capacity') + is_percent = module.params.get('is_percent') + name = module.params.get('name') + policy_type = module.params.get('policy_type') + scaling_group = module.params.get('scaling_group') + state = module.params.get('state') + + if (at or cron) and policy_type == 'webhook': + module.fail_json(msg='policy_type=schedule is required for a time ' + 'based policy') + + setup_rax_module(module, pyrax) + + rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, + desired_capacity=desired_capacity, is_percent=is_percent, + name=name, policy_type=policy_type, scaling_group=scaling_group, + state=state) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/read_csv.py b/ansible_collections/community/general/plugins/modules/read_csv.py new file mode 100644 index 000000000..f2a359fa7 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/read_csv.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: read_csv +short_description: Read a CSV file +description: +- Read a CSV file and return a list or a dictionary, containing one dictionary per row. +author: +- Dag Wieers (@dagwieers) +extends_documentation_fragment: +- community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The CSV filename to read data from. + type: path + required: true + aliases: [ filename ] + key: + description: + - The column name used as a key for the resulting dictionary. + - If C(key) is unset, the module returns a list of dictionaries, + where each dictionary is a row in the CSV file. + type: str + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include C(excel), C(excel-tab) or C(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + unique: + description: + - Whether the C(key) used is expected to be unique. + type: bool + default: true + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by I(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by I(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by I(dialect). + - The default value depends on the dialect used. + type: bool +notes: +- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja. +''' + +EXAMPLES = r''' +# Example CSV file with header +# +# name,uid,gid +# dag,500,500 +# jeroen,501,500 + +# Read a CSV file and access user 'dag' +- name: Read users from CSV file and return a dictionary + community.general.read_csv: + path: users.csv + key: name + register: users + delegate_to: localhost + +- ansible.builtin.debug: + msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}' + +# Read a CSV file and access the first item +- name: Read users from CSV file and return a list + community.general.read_csv: + path: users.csv + register: users + delegate_to: localhost + +- ansible.builtin.debug: + msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}' + +# Example CSV file without header and semi-colon delimiter +# +# dag;500;500 +# jeroen;501;500 + +# Read a CSV file without headers +- name: Read users from CSV file and return a list + community.general.read_csv: + path: users.csv + fieldnames: name,uid,gid + delimiter: ';' + register: users + delegate_to: localhost +''' + +RETURN = r''' +dict: + description: The CSV content as a dictionary. + returned: success + type: dict + sample: + dag: + name: dag + uid: 500 + gid: 500 + jeroen: + name: jeroen + uid: 501 + gid: 500 +list: + description: The CSV content as a list. + returned: success + type: list + sample: + - name: dag + uid: 500 + gid: 500 + - name: jeroen + uid: 501 + gid: 500 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, + DialectNotAvailableError, + CustomDialectFailureError) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['filename']), + dialect=dict(type='str', default='excel'), + key=dict(type='str', no_log=False), + fieldnames=dict(type='list', elements='str'), + unique=dict(type='bool', default=True), + delimiter=dict(type='str'), + skipinitialspace=dict(type='bool'), + strict=dict(type='bool'), + ), + supports_check_mode=True, + ) + + path = module.params['path'] + dialect = module.params['dialect'] + key = module.params['key'] + fieldnames = module.params['fieldnames'] + unique = module.params['unique'] + + dialect_params = { + "delimiter": module.params['delimiter'], + "skipinitialspace": module.params['skipinitialspace'], + "strict": module.params['strict'], + } + + try: + dialect = initialize_dialect(dialect, **dialect_params) + except (CustomDialectFailureError, DialectNotAvailableError) as e: + module.fail_json(msg=to_native(e)) + + try: + with open(path, 'rb') as f: + data = f.read() + except (IOError, OSError) as e: + module.fail_json(msg="Unable to open file: %s" % to_native(e)) + + reader = read_csv(data, dialect, fieldnames) + + if key and key not in reader.fieldnames: + module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) + + data_dict = dict() + data_list = list() + + if key is None: + try: + for row in reader: + data_list.append(row) + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) + else: + try: + for row in reader: + if unique and row[key] in data_dict: + module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) + data_dict[row[key]] = row + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) + + module.exit_json(dict=data_dict, list=data_list) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redfish_command.py b/ansible_collections/community/general/plugins/modules/redfish_command.py new file mode 100644 index 000000000..400677eab --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redfish_command.py @@ -0,0 +1,959 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. + - Manages OOB controller ex. reboot, log management. + - Manages OOB controller users ex. add, remove, update. + - Manages system power ex. on, off, graceful and forced reboot. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + session_uri: + description: + - URI of the session resource. + type: str + version_added: 2.3.0 + id: + required: false + aliases: [ account_id ] + description: + - ID of account to delete/modify. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. + type: str + new_username: + required: false + aliases: [ account_username ] + description: + - Username of account to add/delete/modify. + type: str + new_password: + required: false + aliases: [ account_password ] + description: + - New password of account to add/modify. + type: str + roleid: + required: false + aliases: [ account_roleid ] + description: + - Role of account to add/modify. + type: str + bootdevice: + required: false + description: + - Boot device when setting boot configuration. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + default: 10 + type: int + boot_override_mode: + description: + - Boot mode when using an override. + type: str + choices: [ Legacy, UEFI ] + version_added: 3.5.0 + uefi_target: + required: false + description: + - UEFI boot target when bootdevice is "UefiTarget". + type: str + boot_next: + required: false + description: + - BootNext target when bootdevice is "UefiBootNext". + type: str + update_username: + required: false + aliases: [ account_updatename ] + description: + - New user name for updating account_username. + type: str + version_added: '0.2.0' + account_properties: + required: false + description: + - Properties of account service to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + update_image_uri: + required: false + description: + - URI of the image for the update. + type: str + version_added: '0.2.0' + update_protocol: + required: false + description: + - Protocol for the update. + type: str + version_added: '0.2.0' + update_targets: + required: false + description: + - List of target resource URIs to apply the update to. + type: list + elements: str + default: [] + version_added: '0.2.0' + update_creds: + required: false + description: + - Credentials for retrieving the update image. + type: dict + version_added: '0.2.0' + suboptions: + username: + required: false + description: + - Username for retrieving the update image. + type: str + password: + required: false + description: + - Password for retrieving the update image. + type: str + update_apply_time: + required: false + description: + - Time when to apply the update. + type: str + choices: + - Immediate + - OnReset + - AtMaintenanceWindowStart + - InMaintenanceWindowOnReset + - OnStartUpdateRequest + version_added: '6.1.0' + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + virtual_media: + required: false + description: + - Options for VirtualMedia commands. + type: dict + version_added: '0.2.0' + suboptions: + media_types: + required: false + description: + - List of media types appropriate for the image. + type: list + elements: str + default: [] + image_url: + required: false + description: + - URL of the image to insert or eject. + type: str + inserted: + required: false + description: + - Indicates that the image is treated as inserted on command completion. + type: bool + default: true + write_protected: + required: false + description: + - Indicates that the media is treated as write-protected. + type: bool + default: true + username: + required: false + description: + - Username for accessing the image URL. + type: str + password: + required: false + description: + - Password for accessing the image URL. + type: str + transfer_protocol_type: + required: false + description: + - Network protocol to use with the image. + type: str + transfer_method: + required: false + description: + - Transfer method to use with the image. + type: str + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + bios_attributes: + required: false + description: + - BIOS attributes that needs to be verified in the given server. + type: dict + version_added: 6.4.0 + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +''' + +EXAMPLES = ''' + - name: Restart system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulRestart + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Turn system power off + community.general.redfish_command: + category: Systems + command: PowerForceOff + resource_id: 437XR1138R2 + + - name: Restart system power forcefully + community.general.redfish_command: + category: Systems + command: PowerForceRestart + resource_id: 437XR1138R2 + + - name: Shutdown system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulShutdown + resource_id: 437XR1138R2 + + - name: Turn system power on + community.general.redfish_command: + category: Systems + command: PowerOn + resource_id: 437XR1138R2 + + - name: Reboot system power + community.general.redfish_command: + category: Systems + command: PowerReboot + resource_id: 437XR1138R2 + + - name: Set one-time boot device to {{ bootdevice }} + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiTarget" + uefi_target: "/0x31/0x33/0x01/0x01" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot device to BootNext target of "Boot0001" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiBootNext" + boot_next: "Boot0001" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set persistent boot device override + community.general.redfish_command: + category: Systems + command: EnableContinuousBootOverride + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + boot_next: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Disable persistent boot device override + community.general.redfish_command: + category: Systems + command: DisableBootOverride + + - name: Set system indicator LED to blink using security token for auth + community.general.redfish_command: + category: Systems + command: IndicatorLedBlink + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + + - name: Add user + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + + - name: Add user using new option aliases + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + account_roleid: "{{ account_roleid }}" + + - name: Delete user + community.general.redfish_command: + category: Accounts + command: DeleteUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Disable user + community.general.redfish_command: + category: Accounts + command: DisableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Enable user + community.general.redfish_command: + category: Accounts + command: EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + + - name: Add and enable user + community.general.redfish_command: + category: Accounts + command: AddUser,EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + + - name: Update user password + community.general.redfish_command: + category: Accounts + command: UpdateUserPassword + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + + - name: Update user role + community.general.redfish_command: + category: Accounts + command: UpdateUserRole + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + roleid: "{{ roleid }}" + + - name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_updatename: "{{ account_updatename }}" + + - name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + update_username: "{{ update_username }}" + + - name: Update AccountService properties + community.general.redfish_command: + category: Accounts + command: UpdateAccountServiceProperties + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_properties: + AccountLockoutThreshold: 5 + AccountLockoutDuration: 600 + + - name: Clear Manager Logs with a timeout of 20 seconds + community.general.redfish_command: + category: Manager + command: ClearLogs + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + + - name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Set chassis indicator LED to blink using security token for auth + community.general.redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + + - name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" + + - name: Clear Sessions + community.general.redfish_command: + category: Sessions + command: ClearSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Simple update + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: https://example.com/myupdate.img + + - name: Simple update with additional options + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: //example.com/myupdate.img + update_protocol: FTP + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_creds: + username: operator + password: supersecretpwd + + - name: Perform requested operations to continue the update + community.general.redfish_command: + category: Update + command: PerformRequestedOperations + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + + - name: Insert Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: 1 + + - name: Insert Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: BMC + + - name: Eject Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: 1 + + - name: Eject Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: BMC + + - name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulRestart + resource_id: BMC + + - name: Turn manager power off + community.general.redfish_command: + category: Manager + command: PowerForceOff + resource_id: BMC + + - name: Restart manager power forcefully + community.general.redfish_command: + category: Manager + command: PowerForceRestart + resource_id: BMC + + - name: Shutdown manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulShutdown + resource_id: BMC + + - name: Turn manager power on + community.general.redfish_command: + category: Manager + command: PowerOn + resource_id: BMC + + - name: Reboot manager power + community.general.redfish_command: + category: Manager + command: PowerReboot + resource_id: BMC + + - name: Verify BIOS attributes + community.general.redfish_command: + category: Systems + command: VerifyBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + bios_attributes: + SubNumaClustering: "Disabled" + WorkloadProfile: "Virtualization-MaxPerformance" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.1.0 + sample: { + "update_status": { + "handle": "/redfish/v1/TaskService/TaskMonitors/735", + "messages": [], + "resets_requested": [], + "ret": true, + "status": "New" + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", + "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", + "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", "VirtualMediaEject", "VerifyBiosAttributes"], + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], + "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", + "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", + "UpdateAccountServiceProperties"], + "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], + "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", + "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", + "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], + "Update": ["SimpleUpdate", "PerformRequestedOperations"], +} + + +def main(): + result = {} + return_values = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + session_uri=dict(), + id=dict(aliases=["account_id"]), + new_username=dict(aliases=["account_username"]), + new_password=dict(aliases=["account_password"], no_log=True), + roleid=dict(aliases=["account_roleid"]), + update_username=dict(type='str', aliases=["account_updatename"]), + account_properties=dict(type='dict', default={}), + bootdevice=dict(), + timeout=dict(type='int', default=10), + uefi_target=dict(), + boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), + resource_id=dict(), + update_image_uri=dict(), + update_protocol=dict(), + update_targets=dict(type='list', elements='str', default=[]), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) + ), + update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', + 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), + update_handle=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + strip_etag_quotes=dict(type='bool', default=False), + bios_attributes=dict(type="dict") + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # user to add/modify/delete + user = {'account_id': module.params['id'], + 'account_username': module.params['new_username'], + 'account_password': module.params['new_password'], + 'account_roleid': module.params['roleid'], + 'account_updatename': module.params['update_username'], + 'account_properties': module.params['account_properties']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # update options + update_opts = { + 'update_image_uri': module.params['update_image_uri'], + 'update_protocol': module.params['update_protocol'], + 'update_targets': module.params['update_targets'], + 'update_creds': module.params['update_creds'], + 'update_apply_time': module.params['update_apply_time'], + 'update_handle': module.params['update_handle'], + } + + # Boot override options + boot_opts = { + 'bootdevice': module.params['bootdevice'], + 'uefi_target': module.params['uefi_target'], + 'boot_next': module.params['boot_next'], + 'boot_override_mode': module.params['boot_override_mode'], + } + + # VirtualMedia options + virtual_media = module.params['virtual_media'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # BIOS Attributes options + bios_attributes = module.params['bios_attributes'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Accounts": + ACCOUNTS_COMMANDS = { + "AddUser": rf_utils.add_user, + "EnableUser": rf_utils.enable_user, + "DeleteUser": rf_utils.delete_user, + "DisableUser": rf_utils.disable_user, + "UpdateUserRole": rf_utils.update_user_role, + "UpdateUserPassword": rf_utils.update_user_password, + "UpdateUserName": rf_utils.update_user_name, + "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties + } + + # execute only if we find an Account service resource + result = rf_utils._find_accountservice_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) + + elif category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command.startswith('Power'): + result = rf_utils.manage_system_power(command) + elif command == "SetOneTimeBoot": + boot_opts['override_enabled'] = 'Once' + result = rf_utils.set_boot_override(boot_opts) + elif command == "EnableContinuousBootOverride": + boot_opts['override_enabled'] = 'Continuous' + result = rf_utils.set_boot_override(boot_opts) + elif command == "DisableBootOverride": + boot_opts['override_enabled'] = 'Disabled' + result = rf_utils.set_boot_override(boot_opts) + elif command.startswith('IndicatorLed'): + result = rf_utils.manage_system_indicator_led(command) + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'VerifyBiosAttributes': + result = rf_utils.verify_bios_attributes(bios_attributes) + + elif category == "Chassis": + result = rf_utils._find_chassis_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] + + # Check if more than one led_command is present + num_led_commands = sum([command in led_commands for command in command_list]) + if num_led_commands > 1: + result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + else: + for command in command_list: + if command in led_commands: + result = rf_utils.manage_chassis_indicator_led(command) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ClearSessions": + result = rf_utils.clear_sessions() + elif command == "CreateSession": + result = rf_utils.create_session() + elif command == "DeleteSession": + result = rf_utils.delete_session(module.params['session_uri']) + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + # standardize on the Power* commands, but allow the the legacy + # GracefulRestart command + if command == 'GracefulRestart': + command = 'PowerGracefulRestart' + + if command.startswith('Power'): + result = rf_utils.manage_manager_power(command) + elif command == 'ClearLogs': + result = rf_utils.clear_logs() + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "SimpleUpdate": + result = rf_utils.simple_update(update_opts) + if 'update_status' in result: + return_values['update_status'] = result['update_status'] + elif command == "PerformRequestedOperations": + result = rf_utils.perform_requested_update_operations(update_opts['update_handle']) + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + module.exit_json(changed=changed, session=session, + msg='Action was successful', + return_values=return_values) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redfish_config.py b/ansible_collections/community/general/plugins/modules/redfish_config.py new file mode 100644 index 000000000..9f31870e3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redfish_config.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redfish_config +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + set or update a configuration attribute. + - Manages BIOS configuration settings. + - Manages OOB controller configuration settings. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + bios_attributes: + required: false + description: + - Dictionary of BIOS attributes to update. + default: {} + type: dict + version_added: '0.2.0' + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + default: 10 + type: int + boot_order: + required: false + description: + - List of BootOptionReference strings specifying the BootOrder. + default: [] + type: list + elements: str + version_added: '0.2.0' + network_protocols: + required: false + description: + - Setting dict of manager services to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + nic_addr: + required: false + description: + - EthernetInterface Address string on OOB controller. + default: 'null' + type: str + version_added: '0.2.0' + nic_config: + required: false + description: + - Setting dict of EthernetInterface on OOB controller. + type: dict + default: {} + version_added: '0.2.0' + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + hostinterface_config: + required: false + description: + - Setting dict of HostInterface on OOB controller. + type: dict + default: {} + version_added: '4.1.0' + hostinterface_id: + required: false + description: + - Redfish HostInterface instance ID if multiple HostInterfaces are present. + type: str + version_added: '4.1.0' + sessions_config: + required: false + description: + - Setting dict of Sessions. + type: dict + default: {} + version_added: '5.7.0' + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +''' + +EXAMPLES = ''' + - name: Set BootMode to UEFI + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Uefi" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set multiple BootMode attributes + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Enable PXE Boot for NIC1 + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + PxeDev1EnDis: Enabled + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set BIOS default settings with a timeout of 20 seconds + community.general.redfish_config: + category: Systems + command: SetBiosDefaultSettings + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + + - name: Set boot order + community.general.redfish_config: + category: Systems + command: SetBootOrder + boot_order: + - Boot0002 + - Boot0001 + - Boot0000 + - Boot0003 + - Boot0004 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set boot order to the default + community.general.redfish_config: + category: Systems + command: SetDefaultBootOrder + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set Manager Network Protocols + community.general.redfish_config: + category: Manager + command: SetNetworkProtocols + network_protocols: + SNMP: + ProtocolEnabled: true + Port: 161 + HTTP: + ProtocolEnabled: false + Port: 8080 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set Manager NIC + community.general.redfish_config: + category: Manager + command: SetManagerNic + nic_config: + DHCPv4: + DHCPEnabled: false + IPv4StaticAddresses: + Address: 192.168.1.3 + Gateway: 192.168.1.1 + SubnetMask: 255.255.255.0 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Disable Host Interface + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: false + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Enable Host Interface for HostInterface resource ID '2' + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: true + hostinterface_id: "2" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Set SessionService Session Timeout to 30 minutes + community.general.redfish_config: + category: Sessions + command: SetSessionService + sessions_config: + SessionTimeout: 1800 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Enable SecureBoot + community.general.redfish_config: + category: Systems + command: EnableSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", + "SetDefaultBootOrder", "EnableSecureBoot"], + "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"], + "Sessions": ["SetSessionService"], +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + bios_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + boot_order=dict(type='list', elements='str', default=[]), + network_protocols=dict( + type='dict', + default={} + ), + resource_id=dict(), + nic_addr=dict(default='null'), + nic_config=dict( + type='dict', + default={} + ), + strip_etag_quotes=dict(type='bool', default=False), + hostinterface_config=dict(type='dict', default={}), + hostinterface_id=dict(), + sessions_config=dict(type='dict', default={}), + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # BIOS attributes to update + bios_attributes = module.params['bios_attributes'] + + # boot order + boot_order = module.params['boot_order'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # manager nic + nic_addr = module.params['nic_addr'] + nic_config = module.params['nic_config'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # HostInterface config options + hostinterface_config = module.params['hostinterface_config'] + + # HostInterface instance ID + hostinterface_id = module.params['hostinterface_id'] + + # Sessions config options + sessions_config = module.params['sessions_config'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetBiosDefaultSettings": + result = rf_utils.set_bios_default_settings() + elif command == "SetBiosAttributes": + result = rf_utils.set_bios_attributes(bios_attributes) + elif command == "SetBootOrder": + result = rf_utils.set_boot_order(boot_order) + elif command == "SetDefaultBootOrder": + result = rf_utils.set_default_boot_order() + elif command == "EnableSecureBoot": + result = rf_utils.enable_secure_boot() + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetNetworkProtocols": + result = rf_utils.set_network_protocols(module.params['network_protocols']) + elif command == "SetManagerNic": + result = rf_utils.set_manager_nic(nic_addr, nic_config) + elif command == "SetHostInterface": + result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id) + + elif category == "Sessions": + # execute only if we find a Sessions resource + result = rf_utils._find_sessionservice_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetSessionService": + result = rf_utils.set_session_service(sessions_config) + + # Return data back or fail with proper message + if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redfish_info.py b/ansible_collections/community/general/plugins/modules/redfish_info.py new file mode 100644 index 000000000..364df40b5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redfish_info.py @@ -0,0 +1,569 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redfish_info +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + get information back. + - Information retrieved is placed in a location specified by the user. + - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)! +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + category: + required: false + description: + - List of categories to execute on OOB controller. + default: ['Systems'] + type: list + elements: str + command: + required: false + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + default: 10 + type: int + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + +author: "Jose Delarosa (@jose-delarosa)" +''' + +EXAMPLES = ''' + - name: Get CPU inventory + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" + + - name: Get CPU model + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" + + - name: Get memory inventory + community.general.redfish_info: + category: Systems + command: GetMemoryInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Get fan inventory with a timeout of 20 seconds + community.general.redfish_info: + category: Chassis + command: GetFanInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + + - name: Get Virtual Media information + community.general.redfish_info: + category: Manager + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + + - name: Get Virtual Media information from Systems + community.general.redfish_info: + category: Systems + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + + - name: Get Volume Inventory + community.general.redfish_info: + category: Systems + command: GetVolumeInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" + + - name: Get Session information + community.general.redfish_info: + category: Sessions + command: GetSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" + + - name: Get default inventory information + community.general.redfish_info: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts | to_nice_json }}" + + - name: Get several inventories + community.general.redfish_info: + category: Systems + command: GetNicInventory,GetBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get default system inventory and user information + community.general.redfish_info: + category: Systems,Accounts + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get default system, user and firmware information + community.general.redfish_info: + category: ["Systems", "Accounts", "Update"] + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get Manager NIC inventory information + community.general.redfish_info: + category: Manager + command: GetManagerNicInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get boot override information + community.general.redfish_info: + category: Systems + command: GetBootOverride + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get chassis inventory + community.general.redfish_info: + category: Chassis + command: GetChassisInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get all information available in the Manager category + community.general.redfish_info: + category: Manager + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get firmware update capability information + community.general.redfish_info: + category: Update + command: GetFirmwareUpdateCapabilities + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get firmware inventory + community.general.redfish_info: + category: Update + command: GetFirmwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get software inventory + community.general.redfish_info: + category: Update + command: GetSoftwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get the status of an update operation + community.general.redfish_info: + category: Update + command: GetUpdateStatus + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + + - name: Get Manager Services + community.general.redfish_info: + category: Manager + command: GetNetworkProtocols + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get all information available in all categories + community.general.redfish_info: + category: all + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get system health report + community.general.redfish_info: + category: Systems + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get chassis health report + community.general.redfish_info: + category: Chassis + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get manager health report + community.general.redfish_info: + category: Manager + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get manager Redfish Host Interface inventory + community.general.redfish_info: + category: Manager + command: GetHostInterfaces + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get Manager Inventory + community.general.redfish_info: + category: Manager + command: GetManagerInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get HPE Thermal Config + community.general.redfish_info: + category: Chassis + command: GetHPEThermalConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + + - name: Get HPE Fan Percent Minimum + community.general.redfish_info: + category: Chassis + command: GetHPEFanPercentMin + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +result: + description: different results depending on task + returned: always + type: dict + sample: List of CPUs on system +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + +CATEGORY_COMMANDS_ALL = { + "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", + "GetMemoryInventory", "GetNicInventory", "GetHealthReport", + "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", + "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia"], + "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", + "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"], + "Accounts": ["ListUsers"], + "Sessions": ["GetSessions"], + "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", + "GetUpdateStatus"], + "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", + "GetHealthReport", "GetHostInterfaces", "GetManagerInventory"], +} + +CATEGORY_COMMANDS_DEFAULT = { + "Systems": "GetSystemInventory", + "Chassis": "GetFanInventory", + "Accounts": "ListUsers", + "Update": "GetFirmwareInventory", + "Sessions": "GetSessions", + "Manager": "GetManagerNicInventory" +} + + +def main(): + result = {} + category_list = [] + module = AnsibleModule( + argument_spec=dict( + category=dict(type='list', elements='str', default=['Systems']), + command=dict(type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + update_handle=dict(), + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True, + ) + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # update handle + update_handle = module.params['update_handle'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in range(len(CATEGORY_COMMANDS_ALL[category])): + command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a Systems resource + resource = rf_utils._find_systems_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSystemInventory": + result["system"] = rf_utils.get_multi_system_inventory() + elif command == "GetCpuInventory": + result["cpu"] = rf_utils.get_multi_cpu_inventory() + elif command == "GetMemoryInventory": + result["memory"] = rf_utils.get_multi_memory_inventory() + elif command == "GetNicInventory": + result["nic"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetStorageControllerInventory": + result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() + elif command == "GetDiskInventory": + result["disk"] = rf_utils.get_multi_disk_inventory() + elif command == "GetVolumeInventory": + result["volume"] = rf_utils.get_multi_volume_inventory() + elif command == "GetBiosAttributes": + result["bios_attribute"] = rf_utils.get_multi_bios_attributes() + elif command == "GetBootOrder": + result["boot_order"] = rf_utils.get_multi_boot_order() + elif command == "GetBootOverride": + result["boot_override"] = rf_utils.get_multi_boot_override() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_system_health_report() + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + + elif category == "Chassis": + # execute only if we find Chassis resource + resource = rf_utils._find_chassis_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFanInventory": + result["fan"] = rf_utils.get_fan_inventory() + elif command == "GetPsuInventory": + result["psu"] = rf_utils.get_psu_inventory() + elif command == "GetChassisThermals": + result["thermals"] = rf_utils.get_chassis_thermals() + elif command == "GetChassisPower": + result["chassis_power"] = rf_utils.get_chassis_power() + elif command == "GetChassisInventory": + result["chassis"] = rf_utils.get_chassis_inventory() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_chassis_health_report() + elif command == "GetHPEThermalConfig": + result["hpe_thermal_config"] = rf_utils.get_hpe_thermal_config() + elif command == "GetHPEFanPercentMin": + result["hpe_fan_percent_min"] = rf_utils.get_hpe_fan_percent_min() + + elif category == "Accounts": + # execute only if we find an Account service resource + resource = rf_utils._find_accountservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ListUsers": + result["user"] = rf_utils.list_users() + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFirmwareInventory": + result["firmware"] = rf_utils.get_firmware_inventory() + elif command == "GetSoftwareInventory": + result["software"] = rf_utils.get_software_inventory() + elif command == "GetFirmwareUpdateCapabilities": + result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() + elif command == "GetUpdateStatus": + result["update_status"] = rf_utils.get_update_status(update_handle) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSessions": + result["session"] = rf_utils.get_sessions() + + elif category == "Manager": + # execute only if we find a Manager service resource + resource = rf_utils._find_managers_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetManagerNicInventory": + result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + elif command == "GetLogs": + result["log"] = rf_utils.get_logs() + elif command == "GetNetworkProtocols": + result["network_protocols"] = rf_utils.get_network_protocols() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_manager_health_report() + elif command == "GetHostInterfaces": + result["host_interfaces"] = rf_utils.get_hostinterfaces() + elif command == "GetManagerInventory": + result["manager"] = rf_utils.get_multi_manager_inventory() + + # Return data back + module.exit_json(redfish_facts=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/ansible_collections/community/general/plugins/modules/redhat_subscription.py new file mode 100644 index 000000000..79b0d4b4c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redhat_subscription.py @@ -0,0 +1,1237 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) James Laska (jlaska@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: redhat_subscription +short_description: Manage registration and subscriptions to RHSM using C(subscription-manager) +description: + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command, + registering using D-Bus if possible. +author: "Barnaby Court (@barnabycourt)" +notes: + - | + The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) + to register, starting from community.general 6.5.0: this is done so credentials + (username, password, activation keys) can be passed to C(rhsm) in a secure way. + C(subscription-manager) itself gets credentials only as arguments of command line + parameters, which is I(not) secure, as they can be easily stolen by checking the + process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), + the module will I(not) use D-Bus for registation when trying either to register + using I(token), or when specifying I(environment), or when the system is old + (typically RHEL 6 and older). + - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. + - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl), + I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and + I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) + config file and default to None. + - It is possible to interact with C(subscription-manager) only as root, + so root permissions are required to successfully run this module. + - Since community.general 6.5.0, credentials (that is, I(username) and I(password), + I(activationkey), or I(token)) are needed only in case the the system is not registered, + or I(force_register) is specified; this makes it possible to use the module to tweak an + already registered system, for example attaching pools to it (using I(pool), or I(pool_ids)), + and modifying the C(syspurpose) attributes (using I(syspurpose)). +requirements: + - subscription-manager + - Optionally the C(dbus) Python library; this is usually included in the OS + as it is used by C(subscription-manager). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - whether to register and subscribe (C(present)), or unregister (C(absent)) a system + choices: [ "present", "absent" ] + default: "present" + type: str + username: + description: + - access.redhat.com or Red Hat Satellite or Katello username + type: str + password: + description: + - access.redhat.com or Red Hat Satellite or Katello password + type: str + token: + description: + - sso.redhat.com API access token. + type: str + version_added: 6.3.0 + server_hostname: + description: + - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server + type: str + server_insecure: + description: + - Enable or disable https server certificate verification when connecting to C(server_hostname) + type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + rhsm_baseurl: + description: + - Specify CDN baseurl + type: str + rhsm_repo_ca_cert: + description: + - Specify an alternative location for a CA certificate for CDN + type: str + server_proxy_hostname: + description: + - Specify an HTTP proxy hostname. + type: str + server_proxy_scheme: + description: + - Specify an HTTP proxy scheme, for example C(http) or C(https). + type: str + version_added: 6.2.0 + server_proxy_port: + description: + - Specify an HTTP proxy port. + type: str + server_proxy_user: + description: + - Specify a user for HTTP proxy with basic authentication + type: str + server_proxy_password: + description: + - Specify a password for HTTP proxy with basic authentication + type: str + auto_attach: + description: + - Upon successful registration, auto-consume available subscriptions + - Added in favor of deprecated autosubscribe in 2.5. + type: bool + aliases: [autosubscribe] + activationkey: + description: + - supply an activation key for use with registration + type: str + org_id: + description: + - Organization ID to use in conjunction with activationkey + type: str + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello + type: str + pool: + description: + - | + Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if + possible, as it is much faster. Mutually exclusive with I(pool_ids). + default: '^$' + type: str + pool_ids: + description: + - | + Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. + A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), + or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. + C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple + entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). + default: [] + type: list + elements: raw + consumer_type: + description: + - The type of unit to register, defaults to system + type: str + consumer_name: + description: + - Name of the system to register, defaults to the hostname + type: str + consumer_id: + description: + - | + References an existing consumer ID to resume using a previous registration + for this system. If the system's identity certificate is lost or corrupted, + this option allows it to resume using its previous identity and subscriptions. + The default is to not specify a consumer ID so a new ID is created. + type: str + force_register: + description: + - Register the system even if it is already registered + type: bool + default: false + release: + description: + - Set a release version + type: str + syspurpose: + description: + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) + and synchronize these attributes with RHSM server. Syspurpose attributes help attach + the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file + already contains some attributes, then new attributes overwrite existing attributes. + When some attribute is not listed in the new list of attributes, the existing + attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. + type: dict + suboptions: + usage: + description: Syspurpose attribute usage + type: str + role: + description: Syspurpose attribute role + type: str + service_level_agreement: + description: Syspurpose attribute service_level_agreement + type: str + addons: + description: Syspurpose attribute addons + type: list + elements: str + sync: + description: + - When this option is true, then syspurpose attributes are synchronized with + RHSM server immediately. When this option is false, then syspurpose attributes + will be synchronized with RHSM server by rhsmcertd daemon. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + +- name: Same as above but subscribe to a specific pool by ID. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: 0123456789abcdef0123456789abcdef + +- name: Register and subscribe to multiple pools. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef + - 1123456789abcdef0123456789abcdef + +- name: Same as above but consume multiple entitlements. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef: 2 + - 1123456789abcdef0123456789abcdef: 4 + +- name: Register and pull existing system data. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization + community.general.redhat_subscription: + state: present + activationkey: 1-222333444 + org_id: 222333444 + pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' + +- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) + community.general.redhat_subscription: + state: present + activationkey: 1-222333444 + org_id: 222333444 + pool: '^Red Hat Enterprise Server$' + +- name: Register as user credentials into given environment (against Red Hat Satellite or Katello), and auto-subscribe. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + environment: Library + auto_attach: true + +- name: Register as user (joe_user) with password (somepass) and a specific release + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + release: 7.4 + +- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + syspurpose: + usage: "Production" + role: "Red Hat Enterprise Server" + service_level_agreement: "Premium" + addons: + - addon1 + - addon2 + sync: true +''' + +RETURN = ''' +subscribed_pool_ids: + description: List of pool IDs to which system is now subscribed + returned: success + type: dict + sample: { + "8a85f9815ab905d3015ab928c7005de4": "1" + } +''' + +from os.path import isfile +from os import getuid, unlink +import re +import shutil +import tempfile +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six.moves import configparser +from ansible.module_utils import distro + + +SUBMAN_CMD = None + + +class RegistrationBase(object): + + REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" + + def __init__(self, module, username=None, password=None, token=None): + self.module = module + self.username = username + self.password = password + self.token = token + + def configure(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def enable(self): + # Remove any existing redhat.repo + if isfile(self.REDHAT_REPO): + unlink(self.REDHAT_REPO) + + def register(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unregister(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unsubscribe(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + + if isfile(plugin_conf): + tmpfd, tmpfile = tempfile.mkstemp() + shutil.copy2(plugin_conf, tmpfile) + cfg = configparser.ConfigParser() + cfg.read([tmpfile]) + + if enabled: + cfg.set('main', 'enabled', '1') + else: + cfg.set('main', 'enabled', '0') + + fd = open(tmpfile, 'w+') + cfg.write(fd) + fd.close() + self.module.atomic_move(tmpfile, plugin_conf) + + def subscribe(self, **kwargs): + raise NotImplementedError("Must be implemented by a sub-class") + + +class Rhsm(RegistrationBase): + def __init__(self, module, username=None, password=None, token=None): + RegistrationBase.__init__(self, module, username, password, token) + self.module = module + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHSM + Raises: + * Exception - if error occurs while running command + ''' + + args = [SUBMAN_CMD, 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--server.hostname'. + options = [] + for k, v in sorted(kwargs.items()): + if re.search(r'^(server|rhsm)_', k) and v is not None: + options.append('--%s=%s' % (k.replace('_', '.', 1), v)) + + # When there is nothing to configure, then it is not necessary + # to run config command, because it only returns current + # content of current configuration file + if len(options) == 0: + return + + args.extend(options) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHSM. + ''' + + args = [SUBMAN_CMD, 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def _can_connect_to_dbus(self): + """ + Checks whether it is possible to connect to the system D-Bus bus. + + :returns: bool -- whether it is possible to connect to the system D-Bus bus. + """ + + try: + # Technically speaking, subscription-manager uses dbus-python + # as D-Bus library, so this ought to work; better be safe than + # sorry, I guess... + import dbus + except ImportError: + self.module.debug('dbus Python module not available, will use CLI') + return False + + try: + bus = dbus.SystemBus() + msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test') + bus.send_message(msg) + bus.flush() + + except dbus.exceptions.DBusException as e: + self.module.debug('Failed to connect to system D-Bus bus, will use CLI: %s' % e) + return False + + self.module.debug('Verified system D-Bus bus as usable') + return True + + def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, environment, + release): + ''' + Register the current system to the provided RHSM or Red Hat Satellite + or Katello server + + Raises: + * Exception - if any error occurs during the registration + ''' + # There is no support for token-based registration in the D-Bus API + # of rhsm, so always use the CLI in that case; + # also, since the specified environments are names, and the D-Bus APIs + # require IDs for the environments, use the CLI also in that case + if not token and not environment and self._can_connect_to_dbus(): + self._register_using_dbus(was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + return + self._register_using_cli(username, password, token, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + + def _register_using_cli(self, username, password, token, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using the 'subscription-manager' command + + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'register'] + + # Generate command arguments + if force_register: + args.extend(['--force']) + + if org_id: + args.extend(['--org', org_id]) + + if auto_attach: + args.append('--auto-attach') + + if consumer_type: + args.extend(['--type', consumer_type]) + + if consumer_name: + args.extend(['--name', consumer_name]) + + if consumer_id: + args.extend(['--consumerid', consumer_id]) + + if environment: + args.extend(['--environment', environment]) + + if activationkey: + args.extend(['--activationkey', activationkey]) + elif token: + args.extend(['--token', token]) + else: + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + if release: + args.extend(['--release', release]) + + rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def _register_using_dbus(self, was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using D-Bus (connecting to the rhsm service) + + Raises: + * Exception - if error occurs during the D-Bus communication + ''' + import dbus + + SUBSCRIPTION_MANAGER_LOCALE = 'C' + # Seconds to wait for Registration to complete over DBus; + # 10 minutes should be a pretty generous timeout. + REGISTRATION_TIMEOUT = 600 + + def str2int(s, default=0): + try: + return int(s) + except ValueError: + return default + + distro_id = distro.id() + distro_version_parts = distro.version_parts() + distro_version = tuple(str2int(p) for p in distro_version_parts) + + # Stop the rhsm service when using systemd (which means Fedora or + # RHEL 7+): this is because the service may not use new configuration bits + # - with subscription-manager < 1.26.5-1 (in RHEL < 8.2); + # fixed later by https://github.com/candlepin/subscription-manager/pull/2175 + # - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296 + if distro_id == 'fedora' or distro_version[0] >= 7: + cmd = ['systemctl', 'stop', 'rhsm'] + self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False) + + # While there is a 'force' options for the registration, it is actually + # not implemented (and thus it does not work) + # - in RHEL 7 and earlier + # - in RHEL 8 before 8.8: https://bugzilla.redhat.com/show_bug.cgi?id=2118486 + # - in RHEL 9 before 9.2: https://bugzilla.redhat.com/show_bug.cgi?id=2121350 + # Hence, use it only when implemented, manually unregistering otherwise. + # Match it on RHEL, since we know about it; other distributions + # will need their own logic. + dbus_force_option_works = False + if (distro_id == 'rhel' and + ((distro_version[0] == 8 and distro_version[1] >= 8) or + (distro_version[0] == 9 and distro_version[1] >= 2) or + distro_version[0] > 9)): + dbus_force_option_works = True + + if force_register and not dbus_force_option_works and was_registered: + self.unregister() + + register_opts = {} + if consumer_type: + register_opts['consumer_type'] = consumer_type + if consumer_name: + register_opts['name'] = consumer_name + if consumer_id: + register_opts['consumerid'] = consumer_id + if environment: + # The option for environments used to be 'environment' in versions + # of RHEL before 8.6, and then it changed to 'environments'; since + # the Register*() D-Bus functions reject unknown options, we have + # to pass the right option depending on the version -- funky. + def supports_option_environments(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + + environment_key = 'environment' + if supports_option_environments(): + environment_key = 'environments' + register_opts[environment_key] = environment + if force_register and dbus_force_option_works and was_registered: + register_opts['force'] = True + # Wrap it as proper D-Bus dict + register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1) + + connection_opts = {} + # Wrap it as proper D-Bus dict + connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1) + + bus = dbus.SystemBus() + register_server = bus.get_object('com.redhat.RHSM1', + '/com/redhat/RHSM1/RegisterServer') + address = register_server.Start( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + try: + # Use the private bus to register the system + self.module.debug('Connecting to the private DBus') + private_bus = dbus.connection.Connection(address) + + try: + if activationkey: + args = ( + org_id, + [activationkey], + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'RegisterWithActivationKeys', + 'sasa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + else: + args = ( + org_id or '', + username, + password, + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'Register', + 'sssa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + + except dbus.exceptions.DBusException as e: + # Sometimes we get NoReply but the registration has succeeded. + # Check the registration status before deciding if this is an error. + if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply': + if not self.is_registered(): + # Host is not registered so re-raise the error + raise + else: + raise + # Host was registered so continue + finally: + # Always shut down the private bus + self.module.debug('Shutting down private DBus instance') + register_server.Stop( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + # Make sure to refresh all the local data: this will fetch all the + # certificates, update redhat.repo, etc. + self.module.run_command([SUBMAN_CMD, 'refresh'], + check_rc=True, expand_user_and_vars=False) + + if auto_attach: + args = [SUBMAN_CMD, 'attach', '--auto'] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + # There is no support for setting the release via D-Bus, so invoke + # the CLI for this. + if release: + args = [SUBMAN_CMD, 'release', '--set', release] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def unsubscribe(self, serials=None): + ''' + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. + Raises: + * Exception - if error occurs while running command + ''' + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = [SUBMAN_CMD, 'remove'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', False) + + def subscribe(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression. It matches regexp against available pool ids first. + If any pool ids match, subscribe to those pools and return. + + If no pool ids match, then match regexp against available pool product + names. Note this can still easily match many many pools. Then subscribe + to those pools. + + Since a pool id is a more specific match, we only fallback to matching + against names if we didn't match pool ids. + + Raises: + * Exception - if error occurs while running command + ''' + # See https://github.com/ansible/ansible/issues/19466 + + # subscribe to pools whose pool id matches regexp (and only the pool id) + subscribed_pool_ids = self.subscribe_pool(regexp) + + # If we found any matches, we are done + # Don't attempt to match pools by product name + if subscribed_pool_ids: + return subscribed_pool_ids + + # We didn't match any pool ids. + # Now try subscribing to pools based on product name match + # Note: This can match lots of product names. + subscribed_by_product_pool_ids = self.subscribe_product(regexp) + if subscribed_by_product_pool_ids: + return subscribed_by_product_pool_ids + + # no matches + return [] + + def subscribe_by_pool_ids(self, pool_ids): + """ + Try to subscribe to the list of pool IDs + """ + available_pools = RhsmPools(self.module) + + available_pool_ids = [p.get_pool_id() for p in available_pools] + + for pool_id, quantity in sorted(pool_ids.items()): + if pool_id in available_pool_ids: + args = [SUBMAN_CMD, 'attach', '--pool', pool_id] + if quantity is not None: + args.extend(['--quantity', to_native(quantity)]) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + else: + self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) + return pool_ids + + def subscribe_pool(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + subscribed_pool_ids = [] + for pool in available_pools.filter_pools(regexp): + pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def subscribe_product(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + subscribed_pool_ids = [] + for pool in available_pools.filter_products(regexp): + pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def update_subscriptions(self, regexp): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] + pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) + + serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] + serials = self.unsubscribe(serials=serials_to_remove) + + subscribed_pool_ids = self.subscribe(regexp) + + if subscribed_pool_ids or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, + 'unsubscribed_serials': serials} + + def update_subscriptions_by_pool_ids(self, pool_ids): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + + existing_pools = {} + serials_to_remove = [] + for p in consumed_pools: + pool_id = p.get_pool_id() + quantity_used = p.get_quantity_used() + existing_pools[pool_id] = quantity_used + + quantity = pool_ids.get(pool_id, 0) + if quantity is not None and quantity != quantity_used: + serials_to_remove.append(p.Serial) + + serials = self.unsubscribe(serials=serials_to_remove) + + missing_pools = {} + for pool_id, quantity in sorted(pool_ids.items()): + quantity_used = existing_pools.get(pool_id, 0) + if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used): + missing_pools[pool_id] = quantity + + self.subscribe_by_pool_ids(missing_pools) + + if missing_pools or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), + 'unsubscribed_serials': serials} + + def sync_syspurpose(self): + """ + Try to synchronize syspurpose attributes with server + """ + args = [SUBMAN_CMD, 'status'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k, v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + + def get_quantity_used(self): + return int(getattr(self, 'QuantityUsed')) + + def subscribe(self): + args = "subscription-manager attach --pool %s" % self.get_pool_id() + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + + def __init__(self, module, consumed=False): + self.module = module + self.products = self._load_product_list(consumed) + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self, consumed=False): + """ + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) + """ + args = "subscription-manager list" + if consumed: + args += " --consumed" + else: + args += " --available" + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':', 1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + # else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter_pools(self, regexp='^$'): + ''' + Return a list of RhsmPools whose pool id matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product.get_pool_id()): + yield product + + def filter_products(self, regexp='^$'): + ''' + Return a list of RhsmPools whose product name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + + +class SysPurpose(object): + """ + This class is used for reading and writing to syspurpose.json file + """ + + SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" + + ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] + + def __init__(self, path=None): + """ + Initialize class used for reading syspurpose json file + """ + self.path = path or self.SYSPURPOSE_FILE_PATH + + def update_syspurpose(self, new_syspurpose): + """ + Try to update current syspurpose with new attributes from new_syspurpose + """ + syspurpose = {} + syspurpose_changed = False + for key, value in new_syspurpose.items(): + if key in self.ALLOWED_ATTRIBUTES: + if value is not None: + syspurpose[key] = value + elif key == 'sync': + pass + else: + raise KeyError("Attribute: %s not in list of allowed attributes: %s" % + (key, self.ALLOWED_ATTRIBUTES)) + current_syspurpose = self._read_syspurpose() + if current_syspurpose != syspurpose: + syspurpose_changed = True + # Update current syspurpose with new values + current_syspurpose.update(syspurpose) + # When some key is not listed in new syspurpose, then delete it from current syspurpose + # and ignore custom attributes created by user (e.g. "foo": "bar") + for key in list(current_syspurpose): + if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: + del current_syspurpose[key] + self._write_syspurpose(current_syspurpose) + return syspurpose_changed + + def _write_syspurpose(self, new_syspurpose): + """ + This function tries to update current new_syspurpose attributes to + json file. + """ + with open(self.path, "w") as fp: + fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) + + def _read_syspurpose(self): + """ + Read current syspurpuse from json file. + """ + current_syspurpose = {} + try: + with open(self.path, "r") as fp: + content = fp.read() + except IOError: + pass + else: + current_syspurpose = json.loads(content) + return current_syspurpose + + +def main(): + + # Load RHSM configuration from file + rhsm = Rhsm(None) + + # Note: the default values for parameters are: + # 'type': 'str', 'default': None, 'required': False + # So there is no need to repeat these values for each parameter. + module = AnsibleModule( + argument_spec={ + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'username': {}, + 'password': {'no_log': True}, + 'token': {'no_log': True}, + 'server_hostname': {}, + 'server_insecure': {}, + 'server_prefix': {}, + 'server_port': {}, + 'rhsm_baseurl': {}, + 'rhsm_repo_ca_cert': {}, + 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, + 'activationkey': {'no_log': True}, + 'org_id': {}, + 'environment': {}, + 'pool': {'default': '^$'}, + 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, + 'consumer_type': {}, + 'consumer_name': {}, + 'consumer_id': {}, + 'force_register': {'default': False, 'type': 'bool'}, + 'server_proxy_hostname': {}, + 'server_proxy_scheme': {}, + 'server_proxy_port': {}, + 'server_proxy_user': {}, + 'server_proxy_password': {'no_log': True}, + 'release': {}, + 'syspurpose': { + 'type': 'dict', + 'options': { + 'role': {}, + 'usage': {}, + 'service_level_agreement': {}, + 'addons': {'type': 'list', 'elements': 'str'}, + 'sync': {'type': 'bool', 'default': False} + } + } + }, + required_together=[['username', 'password'], + ['server_proxy_hostname', 'server_proxy_port'], + ['server_proxy_user', 'server_proxy_password']], + mutually_exclusive=[['activationkey', 'username'], + ['activationkey', 'token'], + ['token', 'username'], + ['activationkey', 'consumer_id'], + ['activationkey', 'environment'], + ['activationkey', 'auto_attach'], + ['pool', 'pool_ids']], + required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]], + ) + + if getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + rhsm.module = module + state = module.params['state'] + username = module.params['username'] + password = module.params['password'] + token = module.params['token'] + server_hostname = module.params['server_hostname'] + server_insecure = module.params['server_insecure'] + server_prefix = module.params['server_prefix'] + server_port = module.params['server_port'] + rhsm_baseurl = module.params['rhsm_baseurl'] + rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] + auto_attach = module.params['auto_attach'] + activationkey = module.params['activationkey'] + org_id = module.params['org_id'] + if activationkey and not org_id: + module.fail_json(msg='org_id is required when using activationkey') + environment = module.params['environment'] + pool = module.params['pool'] + pool_ids = {} + for value in module.params['pool_ids']: + if isinstance(value, dict): + if len(value) != 1: + module.fail_json(msg='Unable to parse pool_ids option.') + pool_id, quantity = list(value.items())[0] + else: + pool_id, quantity = value, None + pool_ids[pool_id] = quantity + consumer_type = module.params["consumer_type"] + consumer_name = module.params["consumer_name"] + consumer_id = module.params["consumer_id"] + force_register = module.params["force_register"] + server_proxy_hostname = module.params['server_proxy_hostname'] + server_proxy_port = module.params['server_proxy_port'] + server_proxy_user = module.params['server_proxy_user'] + server_proxy_password = module.params['server_proxy_password'] + release = module.params['release'] + syspurpose = module.params['syspurpose'] + + global SUBMAN_CMD + SUBMAN_CMD = module.get_bin_path('subscription-manager', True) + + syspurpose_changed = False + if syspurpose is not None: + try: + syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) + except Exception as err: + module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) + + # Ensure system is registered + if state == 'present': + + # Cache the status of the system before the changes + was_registered = rhsm.is_registered + + # Register system + if was_registered and not force_register: + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + try: + rhsm.sync_syspurpose() + except Exception as e: + module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) + if pool != '^$' or pool_ids: + try: + if pool_ids: + result = rhsm.update_subscriptions_by_pool_ids(pool_ids) + else: + result = rhsm.update_subscriptions(pool) + except Exception as e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(**result) + else: + if syspurpose_changed is True: + module.exit_json(changed=True, msg="Syspurpose attributes changed.") + else: + module.exit_json(changed=False, msg="System already registered.") + else: + if not username and not activationkey and not token: + module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token") + try: + rhsm.enable() + rhsm.configure(**module.params) + rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, + environment, release) + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + rhsm.sync_syspurpose() + if pool_ids: + subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) + elif pool != '^$': + subscribed_pool_ids = rhsm.subscribe(pool) + else: + subscribed_pool_ids = [] + except Exception as e: + module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) + + # Ensure system is *not* registered + if state == 'absent': + if not rhsm.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + else: + try: + rhsm.unsubscribe() + rhsm.unregister() + except Exception as e: + module.fail_json(msg="Failed to unregister: %s" % to_native(e)) + else: + module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redis.py b/ansible_collections/community/general/plugins/modules/redis.py new file mode 100644 index 000000000..1778a067e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redis.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis +short_description: Various redis commands, replica and flush +description: + - Unified utility to interact with redis instances. +extends_documentation_fragment: + - community.general.redis + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + command: + description: + - The selected redis command + - C(config) ensures a configuration setting on an instance. + - C(flush) flushes all the instance or a specified db. + - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) + choices: [ config, flush, replica, slave ] + type: str + tls: + default: false + version_added: 4.6.0 + login_user: + version_added: 4.6.0 + validate_certs: + version_added: 4.6.0 + ca_certs: + version_added: 4.6.0 + master_host: + description: + - The host of the master instance [replica command] + type: str + master_port: + description: + - The port of the master instance [replica command] + type: int + replica_mode: + description: + - The mode of the redis instance [replica command] + - C(slave) is an alias for C(replica). + default: replica + choices: [ master, replica, slave ] + type: str + aliases: + - slave_mode + db: + description: + - The database to flush (used in db mode) [flush command] + type: int + flush_mode: + description: + - Type of flush (all the dbs in a redis instance or a specific one) + [flush command] + default: all + choices: [ all, db ] + type: str + name: + description: + - A redis config key. + type: str + value: + description: + - A redis config value. When memory size is needed, it is possible + to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024. + Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. + type: str + +notes: + - Requires the redis-py Python package on the remote host. You can + install it with pip (pip install redis) or with a package manager. + https://github.com/andymccurdy/redis-py + - If the redis master instance we are making replica of is password protected + this needs to be in the redis.conf in the masterauth variable + +seealso: + - module: community.general.redis_info +requirements: [ redis ] +author: "Xabier Larrakoetxea (@slok)" +''' + +EXAMPLES = ''' +- name: Set local redis instance to be a replica of melee.island on port 6377 + community.general.redis: + command: replica + master_host: melee.island + master_port: 6377 + +- name: Deactivate replica mode + community.general.redis: + command: replica + replica_mode: master + +- name: Flush all the redis db + community.general.redis: + command: flush + flush_mode: all + +- name: Flush only one db in a redis instance + community.general.redis: + command: flush + db: 1 + flush_mode: db + +- name: Configure local redis to have 10000 max clients + community.general.redis: + command: config + name: maxclients + value: 10000 + +- name: Configure local redis maxmemory to 4GB + community.general.redis: + command: config + name: maxmemory + value: 4GB + +- name: Configure local redis to have lua time limit of 100 ms + community.general.redis: + command: config + name: lua-time-limit + value: 100 +''' + +import traceback + +REDIS_IMP_ERR = None +try: + import redis +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + redis_found = False +else: + redis_found = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, redis_auth_params) +import re + + +# Redis module specific support methods. +def set_replica_mode(client, master_host, master_port): + try: + return client.slaveof(master_host, master_port) + except Exception: + return False + + +def set_master_mode(client): + try: + return client.slaveof() + except Exception: + return False + + +def flush(client, db=None): + try: + if not isinstance(db, int): + return client.flushall() + else: + # The passed client has been connected to the database already + return client.flushdb() + except Exception: + return False + + +# Module execution. +def main(): + redis_auth_args = redis_auth_argument_spec(tls_default=False) + module_args = dict( + command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), + master_host=dict(type='str'), + master_port=dict(type='int'), + replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], + aliases=["slave_mode"]), + db=dict(type='int'), + flush_mode=dict(type='str', default='all', choices=['all', 'db']), + name=dict(type='str'), + value=dict(type='str'), + ) + module_args.update(redis_auth_args) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + fail_imports(module, module.params['tls']) + + redis_params = redis_auth_params(module) + + command = module.params['command'] + if command == "slave": + command = "replica" + + # Replica Command section ----------- + if command == "replica": + master_host = module.params['master_host'] + master_port = module.params['master_port'] + mode = module.params['replica_mode'] + if mode == "slave": + mode = "replica" + + # Check if we have all the data + if mode == "replica": # Only need data if we want to be replica + if not master_host: + module.fail_json(msg='In replica mode master host must be provided') + + if not master_port: + module.fail_json(msg='In replica mode master port must be provided') + + # Connect and check + r = redis.StrictRedis(**redis_params) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Check if we are already in the mode that we want + info = r.info() + if mode == "master" and info["role"] == "master": + module.exit_json(changed=False, mode=mode) + + elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + status = dict( + status=mode, + master_host=master_host, + master_port=master_port, + ) + module.exit_json(changed=False, mode=status) + else: + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "replica": + if module.check_mode or set_replica_mode(r, master_host, master_port): + info = r.info() + status = { + 'status': mode, + 'master_host': master_host, + 'master_port': master_port, + } + module.exit_json(changed=True, mode=status) + else: + module.fail_json(msg='Unable to set replica mode') + + else: + if module.check_mode or set_master_mode(r): + module.exit_json(changed=True, mode=mode) + else: + module.fail_json(msg='Unable to set master mode') + + # flush Command section ----------- + elif command == "flush": + db = module.params['db'] + mode = module.params['flush_mode'] + + # Check if we have all the data + if mode == "db": + if db is None: + module.fail_json(msg="In db mode the db number must be provided") + + # Connect and check + r = redis.StrictRedis(db=db, **redis_params) + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "all": + if module.check_mode or flush(r): + module.exit_json(changed=True, flushed=True) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush all databases") + + else: + if module.check_mode or flush(r, db): + module.exit_json(changed=True, flushed=True, db=db) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush '%d' database" % db) + elif command == 'config': + name = module.params['name'] + + try: # try to parse the value as if it were the memory size + if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()): + value = str(human_to_bytes(module.params['value'].upper())) + else: + value = module.params['value'] + except ValueError: + value = module.params['value'] + + r = redis.StrictRedis(**redis_params) + + try: + r.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + try: + old_value = r.config_get(name)[name] + except Exception as e: + module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc()) + changed = old_value != value + + if module.check_mode or not changed: + module.exit_json(changed=changed, name=name, value=value) + else: + try: + r.config_set(name, value) + except Exception as e: + module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc()) + module.exit_json(changed=changed, name=name, value=value) + else: + module.fail_json(msg='A valid command must be provided') + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redis_data.py b/ansible_collections/community/general/plugins/modules/redis_data.py new file mode 100644 index 000000000..c0c8dcc9a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redis_data.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis_data +short_description: Set key value pairs in Redis +version_added: 3.7.0 +description: + - Set key value pairs in Redis database. +author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + description: + - Database key. + required: true + type: str + value: + description: + - Value that key should be set to. + required: false + type: str + expiration: + description: + - Expiration time in milliseconds. + Setting this flag will always result in a change in the database. + required: false + type: int + non_existing: + description: + - Only set key if it does not already exist. + required: false + type: bool + existing: + description: + - Only set key if it already exists. + required: false + type: bool + keep_ttl: + description: + - Retain the time to live associated with the key. + required: false + type: bool + state: + description: + - State of the key. + default: present + type: str + choices: + - present + - absent + +extends_documentation_fragment: + - community.general.redis.documentation + - community.general.attributes + +seealso: + - module: community.general.redis_data_incr + - module: community.general.redis_data_info + - module: community.general.redis +''' + +EXAMPLES = ''' +- name: Set key foo=bar on localhost with no username + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + state: present + +- name: Set key foo=bar if non existing with expiration of 30s + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + non_existing: true + expiration: 30000 + state: present + +- name: Set key foo=bar if existing and keep current TTL + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + value: bar + existing: true + keep_ttl: true + +- name: Set key foo=bar on redishost with custom ca-cert file + community.general.redis_data: + login_host: redishost + login_password: supersecret + login_user: someuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo + value: bar + +- name: Delete key foo on localhost with no username + community.general.redis_data: + login_host: localhost + login_password: supersecret + key: foo + state: absent +''' + +RETURN = ''' +old_value: + description: Value of key before setting. + returned: on_success if state is C(present) and key exists in database. + type: str + sample: 'old_value_of_key' +value: + description: Value key was set to. + returned: on success if state is C(present). + type: str + sample: 'new_value_of_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Set key: foo to bar' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + value=dict(type='str', required=False), + expiration=dict(type='int', required=False), + non_existing=dict(type='bool', required=False), + existing=dict(type='bool', required=False), + keep_ttl=dict(type='bool', required=False), + state=dict(type='str', default='present', + choices=['present', 'absent']), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[('state', 'present', ('value',))], + mutually_exclusive=[['non_existing', 'existing'], + ['keep_ttl', 'expiration']],) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + value = module.params['value'] + px = module.params['expiration'] + nx = module.params['non_existing'] + xx = module.params['existing'] + keepttl = module.params['keep_ttl'] + state = module.params['state'] + set_args = {'name': key, 'value': value, 'px': px, + 'nx': nx, 'xx': xx, 'keepttl': keepttl} + + result = {'changed': False} + + old_value = None + try: + old_value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if state == 'absent': + if module.check_mode: + if old_value is None: + msg = 'Key: {0} not present'.format(key) + result['msg'] = msg + module.exit_json(**result) + else: + msg = 'Deleted key: {0}'.format(key) + result['msg'] = msg + module.exit_json(**result) + try: + ret = redis.connection.delete(key) + if ret == 0: + msg = 'Key: {0} not present'.format(key) + result['msg'] = msg + module.exit_json(**result) + else: + msg = 'Deleted key: {0}'.format(key) + result['msg'] = msg + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to delete key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + old_value = None + try: + old_value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + result['old_value'] = old_value + if old_value == value and keepttl is not False and px is None: + msg = 'Key {0} already has desired value'.format(key) + result['msg'] = msg + result['value'] = value + module.exit_json(**result) + if module.check_mode: + result['msg'] = 'Set key: {0}'.format(key) + result['value'] = value + module.exit_json(**result) + try: + ret = redis.connection.set(**set_args) + if ret is None: + if nx: + msg = 'Could not set key: {0}. Key already present.'.format( + key) + else: + msg = 'Could not set key: {0}. Key not present.'.format(key) + result['msg'] = msg + module.fail_json(**result) + msg = 'Set key: {0}'.format(key) + result['msg'] = msg + result['changed'] = True + result['value'] = value + module.exit_json(**result) + except Exception as e: + msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redis_data_incr.py b/ansible_collections/community/general/plugins/modules/redis_data_incr.py new file mode 100644 index 000000000..f927fb11f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redis_data_incr.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis_data_incr +short_description: Increment keys in Redis +version_added: 4.0.0 +description: + - Increment integers or float keys in Redis database and get new value. + - Default increment for all keys is 1. For specific increments use the + I(increment_int) and I(increment_float) options. +author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: partial + details: + - For C(check_mode) to work, the specified I(redis_user) needs permission to + run the C(GET) command on the key, otherwise the module will fail. + - When using I(check_mode) the module will try to calculate the value that + Redis would return. If the key is not present, 0.0 is used as value. + diff_mode: + support: none +options: + key: + description: + - Database key. + type: str + required: true + increment_int: + description: + - Integer amount to increment the key by. + required: false + type: int + increment_float: + description: + - Float amount to increment the key by. + - This only works with keys that contain float values + in their string representation. + type: float + required: false + + +extends_documentation_fragment: + - community.general.redis.documentation + - community.general.attributes + +seealso: + - module: community.general.redis_data + - module: community.general.redis_data_info + - module: community.general.redis +''' + +EXAMPLES = ''' +- name: Increment integer key foo on localhost with no username and print new value + community.general.redis_data_incr: + login_host: localhost + login_password: supersecret + key: foo + increment_int: 1 + register: result +- name: Print new value + debug: + var: result.value + +- name: Increment float key foo by 20.4 + community.general.redis_data_incr: + login_host: redishost + login_user: redisuser + login_password: somepass + key: foo + increment_float: '20.4' +''' + +RETURN = ''' +value: + description: Incremented value of key + returned: on success + type: float + sample: '4039.4' +msg: + description: A short message. + returned: always + type: str + sample: 'Incremented key: foo by 20.4 to 65.9' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + increment_int=dict(type='int', required=False), + increment_float=dict(type='float', required=False), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + mutually_exclusive=[['increment_int', 'increment_float']], + ) + fail_imports(module) + + redis = RedisAnsible(module) + key = module.params['key'] + increment_float = module.params['increment_float'] + increment_int = module.params['increment_int'] + increment = 1 + if increment_float is not None: + increment = increment_float + elif increment_int is not None: + increment = increment_int + + result = {'changed': False} + if module.check_mode: + value = 0.0 + try: + res = redis.connection.get(key) + if res is not None: + value = float(res) + except ValueError as e: + msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format( + res, key) + result['msg'] = msg + module.fail_json(**result) + except Exception as e: + msg = 'Failed to get value of key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value + increment) + result['msg'] = msg + result['value'] = float(value + increment) + module.exit_json(**result) + + if increment_float is not None: + try: + value = redis.connection.incrbyfloat(key, increment) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( + key, increment, str(e)) + result['msg'] = msg + module.fail_json(**result) + elif increment_int is not None: + try: + value = redis.connection.incrby(key, increment) + msg = 'Incremented key: {0} by {1} to {2}'.format( + key, increment, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format( + key, increment, str(e)) + result['msg'] = msg + module.fail_json(**result) + else: + try: + value = redis.connection.incr(key) + msg = 'Incremented key: {0} to {1}'.format(key, value) + result['msg'] = msg + result['value'] = float(value) + result['changed'] = True + module.exit_json(**result) + except Exception as e: + msg = 'Failed to increment key: {0} with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redis_data_info.py b/ansible_collections/community/general/plugins/modules/redis_data_info.py new file mode 100644 index 000000000..c0af61905 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redis_data_info.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis_data_info +short_description: Get value of key in Redis database +version_added: 3.7.0 +description: + - Get value of keys in Redis database. +author: "Andreas Botzner (@paginabianca)" +options: + key: + description: + - Database key. + type: str + required: true + +extends_documentation_fragment: + - community.general.redis + - community.general.attributes + - community.general.attributes.info_module + +seealso: + - module: community.general.redis_data + - module: community.general.redis_data_incr + - module: community.general.redis_info + - module: community.general.redis +''' + +EXAMPLES = ''' +- name: Get key foo=bar from loalhost with no username + community.general.redis_data_info: + login_host: localhost + login_password: supersecret + key: foo + +- name: Get key foo=bar on redishost with custom ca-cert file + community.general.redis_data_info: + login_host: redishost + login_password: supersecret + login_user: somuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo +''' + +RETURN = ''' +exists: + description: If they key exists in the database. + returned: on success + type: bool +value: + description: Value key was set to. + returned: if existing + type: str + sample: 'value_of_some_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Got key: foo with value: bar' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + result = {'changed': False} + + value = None + try: + value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key "{0}" with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if value is None: + msg = 'Key "{0}" does not exist in database'.format(key) + result['exists'] = False + else: + msg = 'Got key "{0}"'.format(key) + result['value'] = value + result['exists'] = True + result['msg'] = msg + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/redis_info.py b/ansible_collections/community/general/plugins/modules/redis_info.py new file mode 100644 index 000000000..b9900a7ca --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/redis_info.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: redis_info +short_description: Gather information about Redis servers +version_added: '0.2.0' +description: +- Gathers information and statistics about Redis servers. +extends_documentation_fragment: +- community.general.attributes +- community.general.attributes.info_module +options: + login_host: + description: + - The host running the database. + type: str + default: localhost + login_port: + description: + - The port to connect to. + type: int + default: 6379 + login_password: + description: + - The password used to authenticate with, when authentication is enabled for the Redis server. + type: str +notes: +- Requires the redis-py Python package on the remote host. You can + install it with pip (C(pip install redis)) or with a package manager. + U(https://github.com/andymccurdy/redis-py) +seealso: +- module: community.general.redis +requirements: [ redis ] +author: "Pavlo Bashynskyi (@levonet)" +''' + +EXAMPLES = r''' +- name: Get server information + community.general.redis_info: + register: result + +- name: Print server information + ansible.builtin.debug: + var: result.info +''' + +RETURN = r''' +info: + description: The default set of server information sections U(https://redis.io/commands/info). + returned: success + type: dict + sample: { + "active_defrag_hits": 0, + "active_defrag_key_hits": 0, + "active_defrag_key_misses": 0, + "active_defrag_misses": 0, + "active_defrag_running": 0, + "allocator_active": 932409344, + "allocator_allocated": 932062792, + "allocator_frag_bytes": 346552, + "allocator_frag_ratio": 1.0, + "allocator_resident": 947253248, + "allocator_rss_bytes": 14843904, + "allocator_rss_ratio": 1.02, + "aof_current_rewrite_time_sec": -1, + "aof_enabled": 0, + "aof_last_bgrewrite_status": "ok", + "aof_last_cow_size": 0, + "aof_last_rewrite_time_sec": -1, + "aof_last_write_status": "ok", + "aof_rewrite_in_progress": 0, + "aof_rewrite_scheduled": 0, + "arch_bits": 64, + "atomicvar_api": "atomic-builtin", + "blocked_clients": 0, + "client_recent_max_input_buffer": 4, + "client_recent_max_output_buffer": 0, + "cluster_enabled": 0, + "config_file": "", + "configured_hz": 10, + "connected_clients": 4, + "connected_slaves": 0, + "db0": { + "avg_ttl": 1945628530, + "expires": 16, + "keys": 3341411 + }, + "evicted_keys": 0, + "executable": "/data/redis-server", + "expired_keys": 9, + "expired_stale_perc": 1.72, + "expired_time_cap_reached_count": 0, + "gcc_version": "9.2.0", + "hz": 10, + "instantaneous_input_kbps": 0.0, + "instantaneous_ops_per_sec": 0, + "instantaneous_output_kbps": 0.0, + "keyspace_hits": 0, + "keyspace_misses": 0, + "latest_fork_usec": 0, + "lazyfree_pending_objects": 0, + "loading": 0, + "lru_clock": 11603632, + "master_repl_offset": 118831417, + "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e", + "master_replid2": "0000000000000000000000000000000000000000", + "maxmemory": 0, + "maxmemory_human": "0B", + "maxmemory_policy": "noeviction", + "mem_allocator": "jemalloc-5.1.0", + "mem_aof_buffer": 0, + "mem_clients_normal": 49694, + "mem_clients_slaves": 0, + "mem_fragmentation_bytes": 12355480, + "mem_fragmentation_ratio": 1.01, + "mem_not_counted_for_evict": 0, + "mem_replication_backlog": 1048576, + "migrate_cached_sockets": 0, + "multiplexing_api": "epoll", + "number_of_cached_scripts": 0, + "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64", + "process_id": 1, + "pubsub_channels": 0, + "pubsub_patterns": 0, + "rdb_bgsave_in_progress": 0, + "rdb_changes_since_last_save": 671, + "rdb_current_bgsave_time_sec": -1, + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": -1, + "rdb_last_cow_size": 0, + "rdb_last_save_time": 1588702236, + "redis_build_id": "a31260535f820267", + "redis_git_dirty": 0, + "redis_git_sha1": 0, + "redis_mode": "standalone", + "redis_version": "999.999.999", + "rejected_connections": 0, + "repl_backlog_active": 1, + "repl_backlog_first_byte_offset": 118707937, + "repl_backlog_histlen": 123481, + "repl_backlog_size": 1048576, + "role": "master", + "rss_overhead_bytes": -3051520, + "rss_overhead_ratio": 1.0, + "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4", + "second_repl_offset": 118830003, + "slave_expires_tracked_keys": 0, + "sync_full": 0, + "sync_partial_err": 0, + "sync_partial_ok": 0, + "tcp_port": 6379, + "total_commands_processed": 885, + "total_connections_received": 10, + "total_net_input_bytes": 802709255, + "total_net_output_bytes": 31754, + "total_system_memory": 135029538816, + "total_system_memory_human": "125.76G", + "uptime_in_days": 53, + "uptime_in_seconds": 4631778, + "used_cpu_sys": 4.668282, + "used_cpu_sys_children": 0.002191, + "used_cpu_user": 4.21088, + "used_cpu_user_children": 0.0, + "used_memory": 931908760, + "used_memory_dataset": 910774306, + "used_memory_dataset_perc": "97.82%", + "used_memory_human": "888.74M", + "used_memory_lua": 37888, + "used_memory_lua_human": "37.00K", + "used_memory_overhead": 21134454, + "used_memory_peak": 932015216, + "used_memory_peak_human": "888.84M", + "used_memory_peak_perc": "99.99%", + "used_memory_rss": 944201728, + "used_memory_rss_human": "900.46M", + "used_memory_scripts": 0, + "used_memory_scripts_human": "0B", + "used_memory_startup": 791264 + } +''' + +import traceback + +REDIS_IMP_ERR = None +try: + from redis import StrictRedis + HAS_REDIS_PACKAGE = True +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + HAS_REDIS_PACKAGE = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def redis_client(**client_params): + return StrictRedis(**client_params) + + +# Module execution. +def main(): + module = AnsibleModule( + argument_spec=dict( + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=6379), + login_password=dict(type='str', no_log=True), + ), + supports_check_mode=True, + ) + + if not HAS_REDIS_PACKAGE: + module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) + + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_password = module.params['login_password'] + + # Connect and check + client = redis_client(host=login_host, port=login_port, password=login_password) + try: + client.ping() + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + info = client.info() + module.exit_json(changed=False, info=info) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rhevm.py b/ansible_collections/community/general/plugins/modules/rhevm.py new file mode 100644 index 000000000..c129a2df5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rhevm.py @@ -0,0 +1,1506 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Timothy Vandenbrande +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: rhevm +short_description: RHEV/oVirt automation +description: + - This module only supports oVirt/RHEV version 3. + - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. +requirements: + - ovirtsdk +author: + - Timothy Vandenbrande (@TimothyVandenbrande) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + user: + description: + - The user to authenticate with. + type: str + default: admin@internal + password: + description: + - The password for user authentication. + type: str + required: true + server: + description: + - The name/IP of your RHEV-m/oVirt instance. + type: str + default: 127.0.0.1 + port: + description: + - The port on which the API is reachable. + type: int + default: 443 + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + type: bool + default: false + name: + description: + - The name of the VM. + type: str + cluster: + description: + - The RHEV/oVirt cluster in which you want you VM to start. + type: str + default: '' + datacenter: + description: + - The RHEV/oVirt datacenter in which you want you VM to start. + type: str + default: Default + state: + description: + - This serves to create/remove/update or powermanage your VM. + type: str + choices: [ absent, cd, down, info, ping, present, restarted, up ] + default: present + image: + description: + - The template to use for the VM. + type: str + type: + description: + - To define if the VM is a server or desktop. + type: str + choices: [ desktop, host, server ] + default: server + vmhost: + description: + - The host you wish your VM to run on. + type: str + vmcpu: + description: + - The number of CPUs you want in your VM. + type: int + default: 2 + cpu_share: + description: + - This parameter is used to configure the CPU share. + type: int + default: 0 + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + type: int + default: 1 + osver: + description: + - The operating system option in RHEV/oVirt. + type: str + default: rhel_6x64 + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + type: int + default: 1 + vm_ha: + description: + - To make your VM High Available. + type: bool + default: true + disks: + description: + - This option uses complex arguments and is a list of disks with the options name, size and domain. + type: list + elements: str + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options name and vlan. + type: list + elements: str + aliases: [ interfaces, nics ] + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + type: list + elements: str + default: [ hd, network ] + del_prot: + description: + - This option sets the delete protection checkbox. + type: bool + default: true + cd_drive: + description: + - The CD you wish to have mounted on the VM when I(state = 'CD'). + type: str + timeout: + description: + - The timeout you wish to define for power actions. + - When I(state = 'up'). + - When I(state = 'down'). + - When I(state = 'restarted'). + type: int +''' + +RETURN = r''' +vm: + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: { + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" + } +''' + +EXAMPLES = r''' +- name: Basic get info from VM + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + state: info + +- name: Basic create example from image + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: demo + cluster: centos + image: centos7_x64 + state: present + +- name: Power management + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: uptime_server + image: centos7_x64 + state: down + +- name: Multi disk, multi nic create example + community.general.rhevm: + server: rhevm01 + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + cluster: RH + name: server007 + type: server + vmcpu: 4 + vmmem: 2 + ifaces: + - name: eth0 + vlan: vlan2202 + - name: eth1 + vlan: vlan36 + - name: eth2 + vlan: vlan38 + - name: eth3 + vlan: vlan2202 + disks: + - name: root + size: 10 + domain: ssd-san + - name: swap + size: 10 + domain: 15kiscsi-san + - name: opt + size: 10 + domain: 15kiscsi-san + - name: var + size: 10 + domain: 10kiscsi-san + - name: home + size: 10 + domain: sata-san + boot_order: + - network + - hd + state: present + +- name: Add a CD to the disk cd_drive + community.general.rhevm: + user: '{{ rhev.admin.name }}' + password: '{{ rhev.admin.pass }}' + name: server007 + cd_drive: rhev-tools-setup.iso + state: cd + +- name: New host deployment + host network configuration + community.general.rhevm: + password: '{{ rhevm.admin.pass }}' + name: ovirt_node007 + type: host + cluster: rhevm01 + ifaces: + - name: em1 + - name: em2 + - name: p3p1 + ip: 172.31.224.200 + netmask: 255.255.254.0 + - name: p3p2 + ip: 172.31.225.200 + netmask: 255.255.254.0 + - name: bond0 + bond: + - em1 + - em2 + network: rhevm + ip: 172.31.222.200 + netmask: 255.255.255.0 + management: true + - name: bond0.36 + network: vlan36 + ip: 10.2.36.200 + netmask: 255.255.254.0 + gateway: 10.2.36.254 + - name: bond0.2202 + network: vlan2202 + - name: bond0.38 + network: vlan38 + state: present +''' + +import time + +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params + HAS_SDK = True +except ImportError: + HAS_SDK = False + +from ansible.module_utils.basic import AnsibleModule + + +RHEV_FAILED = 1 +RHEV_SUCCESS = 0 +RHEV_UNAVAILABLE = 2 + +RHEV_TYPE_OPTS = ['desktop', 'host', 'server'] +STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up'] + +msg = [] +changed = False +failed = False + + +class RHEVConn(object): + 'Connection to RHEV-M' + + def __init__(self, module): + self.module = module + + user = module.params.get('user') + password = module.params.get('password') + server = module.params.get('server') + port = module.params.get('port') + insecure_api = module.params.get('insecure_api') + + url = "https://%s:%s" % (server, port) + + try: + api = API(url=url, username=user, password=password, insecure=str(insecure_api)) + api.test() + self.conn = api + except Exception: + raise Exception("Failed to connect to RHEV-M.") + + def __del__(self): + self.conn.disconnect() + + def createVMimage(self, name, cluster, template): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + template=self.conn.templates.get(name=template), + disks=params.Disks(clone=True) + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createVM(self, name, cluster, os, actiontype): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + os=params.OperatingSystem(type_=os), + template=self.conn.templates.get(name="Blank"), + type_=actiontype + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): + VM = self.get_VM(vmname) + + newdisk = params.Disk( + name=diskname, + size=1024 * 1024 * 1024 * int(disksize), + wipe_after_delete=True, + sparse=diskallocationtype, + interface=diskinterface, + format=diskformat, + bootable=diskboot, + storage_domains=params.StorageDomains( + storage_domain=[self.get_domain(diskdomain)] + ) + ) + + try: + VM.disks.add(newdisk) + VM.update() + setMsg("Successfully added disk " + diskname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentdisk = VM.disks.get(name=diskname) + attempt = 1 + while currentdisk.status.state != 'ok': + currentdisk = VM.disks.get(name=diskname) + if attempt == 100: + setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The disk " + diskname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + diskname + ".") + setMsg(str(e)) + return False + return True + + def createNIC(self, vmname, nicname, vlan, interface): + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + newnic = params.NIC( + name=nicname, + network=DC.networks.get(name=vlan), + interface=interface + ) + + try: + VM.nics.add(newnic) + VM.update() + setMsg("Successfully added iface " + nicname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentnic = VM.nics.get(name=nicname) + attempt = 1 + while currentnic.active is not True: + currentnic = VM.nics.get(name=nicname) + if attempt == 100: + setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) + raise Exception() + else: + attempt += 1 + time.sleep(2) + setMsg("The iface " + nicname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + nicname + ".") + setMsg(str(e)) + return False + return True + + def get_DC(self, dc_name): + return self.conn.datacenters.get(name=dc_name) + + def get_DC_byid(self, dc_id): + return self.conn.datacenters.get(id=dc_id) + + def get_VM(self, vm_name): + return self.conn.vms.get(name=vm_name) + + def get_cluster_byid(self, cluster_id): + return self.conn.clusters.get(id=cluster_id) + + def get_cluster(self, cluster_name): + return self.conn.clusters.get(name=cluster_name) + + def get_domain_byid(self, dom_id): + return self.conn.storagedomains.get(id=dom_id) + + def get_domain(self, domain_name): + return self.conn.storagedomains.get(name=domain_name) + + def get_disk(self, disk): + return self.conn.disks.get(disk) + + def get_network(self, dc_name, network_name): + return self.get_DC(dc_name).networks.get(network_name) + + def get_network_byid(self, network_id): + return self.conn.networks.get(id=network_id) + + def get_NIC(self, vm_name, nic_name): + return self.get_VM(vm_name).nics.get(nic_name) + + def get_Host(self, host_name): + return self.conn.hosts.get(name=host_name) + + def get_Host_byid(self, host_id): + return self.conn.hosts.get(id=host_id) + + def set_Memory(self, name, memory): + VM = self.get_VM(name) + VM.memory = int(int(memory) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The Memory has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory.") + setMsg(str(e)) + setFailed() + return False + + def set_Memory_Policy(self, name, memory_policy): + VM = self.get_VM(name) + VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 + try: + VM.update() + setMsg("The memory policy has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory policy.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU(self, name, cpu): + VM = self.get_VM(name) + VM.cpu.topology.cores = int(cpu) + try: + VM.update() + setMsg("The number of CPUs has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the number of CPUs.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU_share(self, name, cpu_share): + VM = self.get_VM(name) + VM.cpu_shares = int(cpu_share) + try: + VM.update() + setMsg("The CPU share has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the CPU share.") + setMsg(str(e)) + setFailed() + return False + + def set_Disk(self, diskname, disksize, diskinterface, diskboot): + DISK = self.get_disk(diskname) + setMsg("Checking disk " + diskname) + if DISK.get_bootable() != diskboot: + try: + DISK.set_bootable(diskboot) + setMsg("Updated the boot option on the disk.") + setChanged() + except Exception as e: + setMsg("Failed to set the boot option on the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The boot option of the disk is correct") + if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): + try: + DISK.size = (1024 * 1024 * 1024 * int(disksize)) + setMsg("Updated the size of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the size of the disk.") + setMsg(str(e)) + setFailed() + return False + elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)): + setMsg("Shrinking disks is not supported") + setFailed() + return False + else: + setMsg("The size of the disk is correct") + if str(DISK.interface) != str(diskinterface): + try: + DISK.interface = diskinterface + setMsg("Updated the interface of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the interface of the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The interface of the disk is correct") + return True + + def set_NIC(self, vmname, nicname, newname, vlan, interface): + NIC = self.get_NIC(vmname, nicname) + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + NETWORK = self.get_network(str(DC.name), vlan) + checkFail() + if NIC.name != newname: + NIC.name = newname + setMsg('Updating iface name to ' + newname) + setChanged() + if str(NIC.network.id) != str(NETWORK.id): + NIC.set_network(NETWORK) + setMsg('Updating iface network to ' + vlan) + setChanged() + if NIC.interface != interface: + NIC.interface = interface + setMsg('Updating iface interface to ' + interface) + setChanged() + try: + NIC.update() + setMsg('iface has successfully been updated.') + except Exception as e: + setMsg("Failed to update the iface.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_DeleteProtection(self, vmname, del_prot): + VM = self.get_VM(vmname) + VM.delete_protected = del_prot + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update delete protection.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_BootOrder(self, vmname, boot_order): + VM = self.get_VM(vmname) + bootorder = [] + for device in boot_order: + bootorder.append(params.Boot(dev=device)) + VM.os.boot = bootorder + + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update the boot order.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_Host(self, host_name, cluster, ifaces): + HOST = self.get_Host(host_name) + CLUSTER = self.get_cluster(cluster) + + if HOST is None: + setMsg("Host does not exist.") + ifacelist = dict() + networklist = [] + manageip = '' + + try: + for iface in ifaces: + try: + setMsg('creating host interface ' + iface['name']) + if 'management' in iface: + manageip = iface['ip'] + if 'boot_protocol' not in iface: + if 'ip' in iface: + iface['boot_protocol'] = 'static' + else: + iface['boot_protocol'] = 'none' + if 'ip' not in iface: + iface['ip'] = '' + if 'netmask' not in iface: + iface['netmask'] = '' + if 'gateway' not in iface: + iface['gateway'] = '' + + if 'network' in iface: + if 'bond' in iface: + bond = [] + for slave in iface['bond']: + bond.append(ifacelist[slave]) + try: + tmpiface = params.Bonding( + slaves=params.Slaves(host_nic=bond), + options=params.Options( + option=[ + params.Option(name='miimon', value='100'), + params.Option(name='mode', value='4') + ] + ) + ) + except Exception as e: + setMsg('Failed to create the bond for ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + try: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + ), + override_configuration=True, + bonding=tmpiface) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + except Exception as e: + setMsg('Failed to set' + iface['name'] + ' as network interface') + setFailed() + setMsg(str(e)) + return False + else: + tmpnetwork = params.HostNIC( + network=params.Network(name=iface['network']), + name=iface['name'], + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + else: + tmpiface = params.HostNIC( + name=iface['name'], + network=params.Network(), + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + ifacelist[iface['name']] = tmpiface + except Exception as e: + setMsg('Failed to set ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + except Exception as e: + setMsg('Failed to set networks') + setMsg(str(e)) + setFailed() + return False + + if manageip == '': + setMsg('No management network is defined') + setFailed() + return False + + try: + HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) + if self.conn.hosts.add(HOST): + setChanged() + HOST = self.get_Host(host_name) + state = HOST.status.state + while (state != 'non_operational' and state != 'up'): + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to add host to RHEVM') + setFailed() + return False + + setMsg('status host: up') + time.sleep(5) + + HOST = self.get_Host(host_name) + state = HOST.status.state + setMsg('State before setting to maintenance: ' + str(state)) + HOST.deactivate() + while state != 'maintenance': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + setMsg('status host: maintenance') + + try: + HOST.nics.setupnetworks(params.Action( + force=True, + check_connectivity=False, + host_nics=params.HostNics(host_nic=networklist) + )) + setMsg('nics are set') + except Exception as e: + setMsg('Failed to apply networkconfig') + setFailed() + setMsg(str(e)) + return False + + try: + HOST.commitnetconfig() + setMsg('Network config is saved') + except Exception as e: + setMsg('Failed to save networkconfig') + setFailed() + setMsg(str(e)) + return False + except Exception as e: + if 'The Host name is already in use' in str(e): + setMsg("Host already exists") + else: + setMsg("Failed to add host") + setFailed() + setMsg(str(e)) + return False + + HOST.activate() + while state != 'up': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to apply networkconfig.') + setFailed() + return False + setMsg('status host: up') + else: + setMsg("Host exists.") + + return True + + def del_NIC(self, vmname, nicname): + return self.get_NIC(vmname, nicname).delete() + + def remove_VM(self, vmname): + VM = self.get_VM(vmname) + try: + VM.delete() + except Exception as e: + setMsg("Failed to remove VM.") + setMsg(str(e)) + setFailed() + return False + return True + + def start_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.start() + except Exception as e: + setMsg("Failed to start VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "up", timeout) + + def wait_VM(self, vmname, state, timeout): + VM = self.get_VM(vmname) + while VM.status.state != state: + VM = self.get_VM(vmname) + time.sleep(10) + if timeout is not False: + timeout -= 10 + if timeout <= 0: + setMsg("Timeout expired") + setFailed() + return False + return True + + def stop_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.stop() + except Exception as e: + setMsg("Failed to stop VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "down", timeout) + + def set_CD(self, vmname, cd_drive): + VM = self.get_VM(vmname) + try: + if str(VM.status.state) == 'down': + cdrom = params.CdRom(file=cd_drive) + VM.cdroms.add(cdrom) + setMsg("Attached the image.") + setChanged() + else: + cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") + cdrom.set_file(cd_drive) + cdrom.update(current=True) + setMsg("Attached the image.") + setChanged() + except Exception as e: + setMsg("Failed to attach image.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_VM_Host(self, vmname, vmhost): + VM = self.get_VM(vmname) + HOST = self.get_Host(vmhost) + try: + VM.placement_policy.host = HOST + VM.update() + setMsg("Set startup host to " + vmhost) + setChanged() + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def migrate_VM(self, vmname, vmhost): + VM = self.get_VM(vmname) + + HOST = self.get_Host_byid(VM.host.id) + if str(HOST.name) != vmhost: + try: + VM.migrate( + action=params.Action( + host=params.Host( + name=vmhost, + ) + ), + ) + setChanged() + setMsg("VM migrated to " + vmhost) + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def remove_CD(self, vmname): + VM = self.get_VM(vmname) + try: + VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() + setMsg("Removed the image.") + setChanged() + except Exception as e: + setMsg("Failed to remove the image.") + setMsg(str(e)) + setFailed() + return False + return True + + +class RHEV(object): + def __init__(self, module): + self.module = module + + def __get_conn(self): + self.conn = RHEVConn(self.module) + return self.conn + + def test(self): + self.__get_conn() + return "OK" + + def getVM(self, name): + self.__get_conn() + VM = self.conn.get_VM(name) + if VM: + vminfo = dict() + vminfo['uuid'] = VM.id + vminfo['name'] = VM.name + vminfo['status'] = VM.status.state + vminfo['cpu_cores'] = VM.cpu.topology.cores + vminfo['cpu_sockets'] = VM.cpu.topology.sockets + vminfo['cpu_shares'] = VM.cpu_shares + vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024) + vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024) + vminfo['os'] = VM.get_os().type_ + vminfo['del_prot'] = VM.delete_protected + try: + vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) + except Exception: + vminfo['host'] = None + vminfo['boot_order'] = [] + for boot_dev in VM.os.get_boot(): + vminfo['boot_order'].append(str(boot_dev.dev)) + vminfo['disks'] = [] + for DISK in VM.disks.list(): + disk = dict() + disk['name'] = DISK.name + disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024) + disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) + disk['interface'] = DISK.interface + vminfo['disks'].append(disk) + vminfo['ifaces'] = [] + for NIC in VM.nics.list(): + iface = dict() + iface['name'] = str(NIC.name) + iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) + iface['interface'] = NIC.interface + iface['mac'] = NIC.mac.address + vminfo['ifaces'].append(iface) + vminfo[str(NIC.name)] = NIC.mac.address + CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) + if CLUSTER: + vminfo['cluster'] = CLUSTER.name + else: + vminfo = False + return vminfo + + def createVMimage(self, name, cluster, template, disks): + self.__get_conn() + return self.conn.createVMimage(name, cluster, template, disks) + + def createVM(self, name, cluster, os, actiontype): + self.__get_conn() + return self.conn.createVM(name, cluster, os, actiontype) + + def setMemory(self, name, memory): + self.__get_conn() + return self.conn.set_Memory(name, memory) + + def setMemoryPolicy(self, name, memory_policy): + self.__get_conn() + return self.conn.set_Memory_Policy(name, memory_policy) + + def setCPU(self, name, cpu): + self.__get_conn() + return self.conn.set_CPU(name, cpu) + + def setCPUShare(self, name, cpu_share): + self.__get_conn() + return self.conn.set_CPU_share(name, cpu_share) + + def setDisks(self, name, disks): + self.__get_conn() + counter = 0 + bootselect = False + for disk in disks: + if 'bootable' in disk: + if disk['bootable'] is True: + bootselect = True + + for disk in disks: + diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') + disksize = disk.get('size', 1) + diskdomain = disk.get('domain', None) + if diskdomain is None: + setMsg("`domain` is a required disk key.") + setFailed() + return False + diskinterface = disk.get('interface', 'virtio') + diskformat = disk.get('format', 'raw') + diskallocationtype = disk.get('thin', False) + diskboot = disk.get('bootable', False) + + if bootselect is False and counter == 0: + diskboot = True + + DISK = self.conn.get_disk(diskname) + + if DISK is None: + self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) + else: + self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) + checkFail() + counter += 1 + + return True + + def setNetworks(self, vmname, ifaces): + self.__get_conn() + VM = self.conn.get_VM(vmname) + + counter = 0 + length = len(ifaces) + + for NIC in VM.nics.list(): + if counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + elif str(name) != str(NIC.name): + setMsg("ifaces are in the wrong order, rebuilding everything.") + for NIC in VM.nics.list(): + self.conn.del_NIC(vmname, NIC.name) + self.setNetworks(vmname, ifaces) + checkFail() + return True + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + checkFail() + interface = iface.get('interface', 'virtio') + self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) + else: + self.conn.del_NIC(vmname, NIC.name) + counter += 1 + checkFail() + + while counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + if failed is True: + return False + interface = iface.get('interface', 'virtio') + self.conn.createNIC(vmname, name, vlan, interface) + + counter += 1 + checkFail() + return True + + def setDeleteProtection(self, vmname, del_prot): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if bool(VM.delete_protected) != bool(del_prot): + self.conn.set_DeleteProtection(vmname, del_prot) + checkFail() + setMsg("`delete protection` has been updated.") + else: + setMsg("`delete protection` already has the right value.") + return True + + def setBootOrder(self, vmname, boot_order): + self.__get_conn() + VM = self.conn.get_VM(vmname) + bootorder = [] + for boot_dev in VM.os.get_boot(): + bootorder.append(str(boot_dev.dev)) + + if boot_order != bootorder: + self.conn.set_BootOrder(vmname, boot_order) + setMsg('The boot order has been set') + else: + setMsg('The boot order has already been set') + return True + + def removeVM(self, vmname): + self.__get_conn() + self.setPower(vmname, "down", 300) + return self.conn.remove_VM(vmname) + + def setPower(self, vmname, state, timeout): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if VM is None: + setMsg("VM does not exist.") + setFailed() + return False + + if state == VM.status.state: + setMsg("VM state was already " + state) + else: + if state == "up": + setMsg("VM is going to start") + self.conn.start_VM(vmname, timeout) + setChanged() + elif state == "down": + setMsg("VM is going to stop") + self.conn.stop_VM(vmname, timeout) + setChanged() + elif state == "restarted": + self.setPower(vmname, "down", timeout) + checkFail() + self.setPower(vmname, "up", timeout) + checkFail() + setMsg("the vm state is set to " + state) + return True + + def setCD(self, vmname, cd_drive): + self.__get_conn() + if cd_drive: + return self.conn.set_CD(vmname, cd_drive) + else: + return self.conn.remove_CD(vmname) + + def setVMHost(self, vmname, vmhost): + self.__get_conn() + return self.conn.set_VM_Host(vmname, vmhost) + + def setHost(self, hostname, cluster, ifaces): + self.__get_conn() + return self.conn.set_Host(hostname, cluster, ifaces) + + +def checkFail(): + if failed: + module.fail_json(msg=msg) + else: + return True + + +def setFailed(): + global failed + failed = True + + +def setChanged(): + global changed + changed = True + + +def setMsg(message): + msg.append(message) + + +def core(module): + + r = RHEV(module) + + state = module.params.get('state') + + if state == 'ping': + r.test() + return RHEV_SUCCESS, {"ping": "pong"} + elif state == 'info': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + elif state == 'present': + created = False + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + else: + # Create VM + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + template = module.params.get('image') + if template: + disks = module.params.get('disks') + if disks is None: + setMsg("disks is a required argument.") + setFailed() + checkFail() + if r.createVMimage(name, cluster, template, disks) is False: + return RHEV_FAILED, vminfo + else: + os = module.params.get('osver') + if os is None: + setMsg("osver is a required argument.") + setFailed() + checkFail() + if r.createVM(name, cluster, os, actiontype) is False: + return RHEV_FAILED, vminfo + created = True + + # Set MEMORY and MEMORY POLICY + vminfo = r.getVM(name) + memory = module.params.get('vmmem') + if memory is not None: + memory_policy = module.params.get('mempol') + if memory_policy == 0: + memory_policy = memory + mem_pol_nok = True + if int(vminfo['mem_pol']) == memory_policy: + setMsg("Memory is correct") + mem_pol_nok = False + + mem_nok = True + if int(vminfo['memory']) == memory: + setMsg("Memory is correct") + mem_nok = False + + if memory_policy > memory: + setMsg('memory_policy cannot have a higher value than memory.') + return RHEV_FAILED, msg + + if mem_nok and mem_pol_nok: + if memory_policy > int(vminfo['memory']): + r.setMemory(vminfo['name'], memory) + r.setMemoryPolicy(vminfo['name'], memory_policy) + else: + r.setMemoryPolicy(vminfo['name'], memory_policy) + r.setMemory(vminfo['name'], memory) + elif mem_nok: + r.setMemory(vminfo['name'], memory) + elif mem_pol_nok: + r.setMemoryPolicy(vminfo['name'], memory_policy) + checkFail() + + # Set CPU + cpu = module.params.get('vmcpu') + if int(vminfo['cpu_cores']) == cpu: + setMsg("Number of CPUs is correct") + else: + if r.setCPU(vminfo['name'], cpu) is False: + return RHEV_FAILED, msg + + # Set CPU SHARE + cpu_share = module.params.get('cpu_share') + if cpu_share is not None: + if int(vminfo['cpu_shares']) == cpu_share: + setMsg("CPU share is correct.") + else: + if r.setCPUShare(vminfo['name'], cpu_share) is False: + return RHEV_FAILED, msg + + # Set DISKS + disks = module.params.get('disks') + if disks is not None: + if r.setDisks(vminfo['name'], disks) is False: + return RHEV_FAILED, msg + + # Set NETWORKS + ifaces = module.params.get('ifaces', None) + if ifaces is not None: + if r.setNetworks(vminfo['name'], ifaces) is False: + return RHEV_FAILED, msg + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Set Boot Order + boot_order = module.params.get('boot_order') + if r.setBootOrder(vminfo['name'], boot_order) is False: + return RHEV_FAILED, msg + + # Set VM Host + vmhost = module.params.get('vmhost') + if vmhost: + if r.setVMHost(vminfo['name'], vmhost) is False: + return RHEV_FAILED, msg + + vminfo = r.getVM(name) + vminfo['created'] = created + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + if actiontype == 'host': + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + ifaces = module.params.get('ifaces') + if ifaces is None: + setMsg("ifaces is a required argument.") + setFailed() + if r.setHost(name, cluster, ifaces) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + elif state == 'absent': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Remove VM + if r.removeVM(vminfo['name']) is False: + return RHEV_FAILED, msg + setMsg('VM has been removed.') + vminfo['state'] = 'DELETED' + else: + setMsg('VM was already removed.') + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'up' or state == 'down' or state == 'restarted': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + timeout = module.params.get('timeout') + if r.setPower(name, state, timeout) is False: + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'cd': + name = module.params.get('name') + cd_drive = module.params.get('cd_drive') + if r.setCD(name, cd_drive) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']), + user=dict(type='str', default='admin@internal'), + password=dict(type='str', required=True, no_log=True), + server=dict(type='str', default='127.0.0.1'), + port=dict(type='int', default=443), + insecure_api=dict(type='bool', default=False), + name=dict(type='str'), + image=dict(type='str'), + datacenter=dict(type='str', default="Default"), + type=dict(type='str', default='server', choices=['desktop', 'host', 'server']), + cluster=dict(type='str', default=''), + vmhost=dict(type='str'), + vmcpu=dict(type='int', default=2), + vmmem=dict(type='int', default=1), + disks=dict(type='list', elements='str'), + osver=dict(type='str', default="rhel_6x64"), + ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']), + timeout=dict(type='int'), + mempol=dict(type='int', default=1), + vm_ha=dict(type='bool', default=True), + cpu_share=dict(type='int', default=0), + boot_order=dict(type='list', elements='str', default=['hd', 'network']), + del_prot=dict(type='bool', default=True), + cd_drive=dict(type='str'), + ), + ) + + if not HAS_SDK: + module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.") + + rc = RHEV_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rhn_channel.py b/ansible_collections/community/general/plugins/modules/rhn_channel.py new file mode 100644 index 000000000..e544af51e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rhn_channel.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) Vincent Van de Kussen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: rhn_channel +short_description: Adds or removes Red Hat software channels +description: + - Adds or removes Red Hat software channels. +author: + - Vincent Van der Kussen (@vincentvdk) +notes: + - This module fetches the system id from RHN. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the software channel. + required: true + type: str + sysname: + description: + - Name of the system as it is known in RHN/Satellite. + required: true + type: str + state: + description: + - Whether the channel should be present or not, taking action if the state is different from what is stated. + default: present + choices: [ present, absent ] + type: str + url: + description: + - The full URL to the RHN/Satellite API. + required: true + type: str + user: + description: + - RHN/Satellite login. + required: true + type: str + password: + description: + - RHN/Satellite password. + aliases: [pwd] + required: true + type: str + validate_certs: + description: + - If C(False), SSL certificates will not be validated. + - This should only set to C(False) when used on self controlled sites + using self-signed certificates, and you are absolutely sure that nobody + can modify traffic between the module and the site. + type: bool + default: true + version_added: '0.2.0' +''' + +EXAMPLES = ''' +- name: Add a Red Hat software channel + community.general.rhn_channel: + name: rhel-x86_64-server-v2vwin-6 + sysname: server01 + url: https://rhn.redhat.com/rpc/api + user: rhnuser + password: guessme + delegate_to: localhost +''' + +import ssl +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +def get_systemid(client, session, sysname): + systems = client.system.listUserSystems(session) + for system in systems: + if system.get('name') == sysname: + idres = system.get('id') + idd = int(idres) + return idd + + +def subscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.append(channelname) + return client.system.setChildChannels(session, sys_id, channels) + + +def unsubscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.remove(channelname) + return client.system.setChildChannels(session, sys_id, channels) + + +def base_channels(client, session, sys_id): + basechan = client.channel.software.listSystemChannels(session, sys_id) + try: + chans = [item['label'] for item in basechan] + except KeyError: + chans = [item['channel_label'] for item in basechan] + return chans + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + sysname=dict(type='str', required=True), + url=dict(type='str', required=True), + user=dict(type='str', required=True), + password=dict(type='str', required=True, aliases=['pwd'], no_log=True), + validate_certs=dict(type='bool', default=True), + ) + ) + + state = module.params['state'] + channelname = module.params['name'] + systname = module.params['sysname'] + saturl = module.params['url'] + user = module.params['user'] + password = module.params['password'] + validate_certs = module.params['validate_certs'] + + ssl_context = None + if not validate_certs: + try: # Python 2.7.9 and newer + ssl_context = ssl.create_unverified_context() + except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default + ssl_context = ssl._create_unverified_context() + else: # Python 2.7.8 and older + ssl._create_default_https_context = ssl._create_unverified_https_context + + # initialize connection + if ssl_context: + client = xmlrpc_client.ServerProxy(saturl, context=ssl_context) + else: + client = xmlrpc_client.Server(saturl) + + try: + session = client.auth.login(user, password) + except Exception as e: + module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e)) + + if not session: + module.fail_json(msg="Failed to establish session with Satellite server.") + + # get systemid + try: + sys_id = get_systemid(client, session, systname) + except Exception as e: + module.fail_json(msg="Unable to get system id: %s " % to_text(e)) + + if not sys_id: + module.fail_json(msg="Failed to get system id.") + + # get channels for system + try: + chans = base_channels(client, session, sys_id) + except Exception as e: + module.fail_json(msg="Unable to get channel information: %s " % to_text(e)) + + try: + if state == 'present': + if channelname in chans: + module.exit_json(changed=False, msg="Channel %s already exists" % channelname) + else: + subscribe_channels(channelname, client, session, systname, sys_id) + module.exit_json(changed=True, msg="Channel %s added" % channelname) + + if state == 'absent': + if channelname not in chans: + module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) + else: + unsubscribe_channels(channelname, client, session, systname, sys_id) + module.exit_json(changed=True, msg="Channel %s removed" % channelname) + except Exception as e: + module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e))) + finally: + client.auth.logout(session) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rhn_register.py b/ansible_collections/community/general/plugins/modules/rhn_register.py new file mode 100644 index 000000000..1fe9297d2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rhn_register.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) James Laska +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: rhn_register +short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command +description: + - Manage registration to the Red Hat Network. +author: + - James Laska (@jlaska) +notes: + - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead. + - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey. +requirements: + - rhnreg_ks + - either libxml2 or lxml +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether to register (C(present)), or unregister (C(absent)) a system. + type: str + choices: [ absent, present ] + default: present + username: + description: + - Red Hat Network username. + type: str + password: + description: + - Red Hat Network password. + type: str + server_url: + description: + - Specify an alternative Red Hat Network server URL. + - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date). + type: str + activationkey: + description: + - Supply an activation key for use with registration. + type: str + profilename: + description: + - Supply an profilename for use with registration. + type: str + force: + description: + - Force registration, even if system is already registered. + type: bool + default: false + version_added: 2.0.0 + ca_cert: + description: + - Supply a custom ssl CA certificate file for use with registration. + type: path + aliases: [ sslcacert ] + systemorgid: + description: + - Supply an organizational id for use with registration. + type: str + channels: + description: + - Optionally specify a list of channels to subscribe to upon successful registration. + type: list + elements: str + default: [] + enable_eus: + description: + - If C(false), extended update support will be requested. + type: bool + default: false + nopackages: + description: + - If C(true), the registered node will not upload its installed packages information to Satellite server. + type: bool + default: false +''' + +EXAMPLES = r''' +- name: Unregister system from RHN + community.general.rhn_register: + state: absent + username: joe_user + password: somepass + +- name: Register as user with password and auto-subscribe to available content + community.general.rhn_register: + state: present + username: joe_user + password: somepass + +- name: Register with activationkey and enable extended update support + community.general.rhn_register: + state: present + activationkey: 1-222333444 + enable_eus: true + +- name: Register with activationkey and set a profilename which may differ from the hostname + community.general.rhn_register: + state: present + activationkey: 1-222333444 + profilename: host.example.com.custom + +- name: Register as user with password against a satellite server + community.general.rhn_register: + state: present + username: joe_user + password: somepass + server_url: https://xmlrpc.my.satellite/XMLRPC + +- name: Register as user with password and enable channels + community.general.rhn_register: + state: present + username: joe_user + password: somepass + channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 + +- name: Force-register as user with password to ensure registration is current on server + community.general.rhn_register: + state: present + username: joe_user + password: somepass + server_url: https://xmlrpc.my.satellite/XMLRPC + force: true +''' + +RETURN = r''' +# Default return values +''' + +import os +import sys + +# Attempt to import rhn client tools +sys.path.insert(0, '/usr/share/rhn') +try: + import up2date_client + import up2date_client.config + HAS_UP2DATE_CLIENT = True +except ImportError: + HAS_UP2DATE_CLIENT = False + +# INSERT REDHAT SNIPPETS +from ansible_collections.community.general.plugins.module_utils import redhat +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import urllib, xmlrpc_client + + +class Rhn(redhat.RegistrationBase): + + def __init__(self, module=None, username=None, password=None): + redhat.RegistrationBase.__init__(self, module, username, password) + self.config = self.load_config() + self.server = None + self.session = None + + def logout(self): + if self.session is not None: + self.server.auth.logout(self.session) + + def load_config(self): + ''' + Read configuration from /etc/sysconfig/rhn/up2date + ''' + if not HAS_UP2DATE_CLIENT: + return None + + config = up2date_client.config.initUp2dateConfig() + + return config + + @property + def server_url(self): + return self.config['serverURL'] + + @property + def hostname(self): + ''' + Return the non-xmlrpc RHN hostname. This is a convenience method + used for displaying a more readable RHN hostname. + + Returns: str + ''' + url = urllib.parse.urlparse(self.server_url) + return url[1].replace('xmlrpc.', '') + + @property + def systemid(self): + systemid = None + xpath_str = "//member[name='system_id']/value/string" + + if os.path.isfile(self.config['systemIdPath']): + fd = open(self.config['systemIdPath'], 'r') + xml_data = fd.read() + fd.close() + + # Ugh, xml parsing time ... + # First, try parsing with libxml2 ... + if systemid is None: + try: + import libxml2 + doc = libxml2.parseDoc(xml_data) + ctxt = doc.xpathNewContext() + systemid = ctxt.xpathEval(xpath_str)[0].content + doc.freeDoc() + ctxt.xpathFreeContext() + except ImportError: + pass + + # m-kay, let's try with lxml now ... + if systemid is None: + try: + from lxml import etree + root = etree.fromstring(xml_data) + systemid = root.xpath(xpath_str)[0].text + except ImportError: + raise Exception('"libxml2" or "lxml" is required for this module.') + + # Strip the 'ID-' prefix + if systemid is not None and systemid.startswith('ID-'): + systemid = systemid[3:] + + return int(systemid) + + @property + def is_registered(self): + ''' + Determine whether the current system is registered. + + Returns: True|False + ''' + return os.path.isfile(self.config['systemIdPath']) + + def configure_server_url(self, server_url): + ''' + Configure server_url for registration + ''' + + self.config.set('serverURL', server_url) + self.config.save() + + def enable(self): + ''' + Prepare the system for RHN registration. This includes ... + * enabling the rhnplugin yum plugin + * disabling the subscription-manager yum plugin + ''' + redhat.RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', True) + self.update_plugin_conf('subscription-manager', False) + + def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False): + ''' + Register system to RHN. If enable_eus=True, extended update + support will be requested. + ''' + register_cmd = ['/usr/sbin/rhnreg_ks', '--force'] + if self.username: + register_cmd.extend(['--username', self.username, '--password', self.password]) + if self.server_url: + register_cmd.extend(['--serverUrl', self.server_url]) + if enable_eus: + register_cmd.append('--use-eus-channel') + if nopackages: + register_cmd.append('--nopackages') + if activationkey is not None: + register_cmd.extend(['--activationkey', activationkey]) + if profilename is not None: + register_cmd.extend(['--profilename', profilename]) + if sslcacert is not None: + register_cmd.extend(['--sslCACert', sslcacert]) + if systemorgid is not None: + register_cmd.extend(['--systemorgid', systemorgid]) + rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True) + + def api(self, method, *args): + ''' + Convenience RPC wrapper + ''' + if self.server is None: + if self.hostname != 'rhn.redhat.com': + url = "https://%s/rpc/api" % self.hostname + else: + url = "https://xmlrpc.%s/rpc/api" % self.hostname + self.server = xmlrpc_client.ServerProxy(url) + self.session = self.server.auth.login(self.username, self.password) + + func = getattr(self.server, method) + return func(self.session, *args) + + def unregister(self): + ''' + Unregister a previously registered system + ''' + + # Initiate RPC connection + self.api('system.deleteSystems', [self.systemid]) + + # Remove systemid file + os.unlink(self.config['systemIdPath']) + + def subscribe(self, channels): + if not channels: + return + + if self._is_hosted(): + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + new_channels = [item['channel_label'] for item in current_channels] + new_channels.extend(channels) + return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) + + else: + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + current_channels = [item['label'] for item in current_channels] + new_base = None + new_childs = [] + for ch in channels: + if ch in current_channels: + continue + if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': + new_base = ch + else: + if ch not in new_childs: + new_childs.append(ch) + out_base = 0 + out_childs = 0 + + if new_base: + out_base = self.api('system.setBaseChannel', self.systemid, new_base) + + if new_childs: + out_childs = self.api('system.setChildChannels', self.systemid, new_childs) + + return out_base and out_childs + + def _is_hosted(self): + ''' + Return True if we are running against Hosted (rhn.redhat.com) or + False otherwise (when running against Satellite or Spacewalk) + ''' + return 'rhn.redhat.com' in self.hostname + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + username=dict(type='str'), + password=dict(type='str', no_log=True), + server_url=dict(type='str'), + activationkey=dict(type='str', no_log=True), + profilename=dict(type='str'), + ca_cert=dict(type='path', aliases=['sslcacert']), + systemorgid=dict(type='str'), + enable_eus=dict(type='bool', default=False), + force=dict(type='bool', default=False), + nopackages=dict(type='bool', default=False), + channels=dict(type='list', elements='str', default=[]), + ), + # username/password is required for state=absent, or if channels is not empty + # (basically anything that uses self.api requires username/password) but it doesn't + # look like we can express that with required_if/required_together/mutually_exclusive + + # only username+password can be used for unregister + required_if=[['state', 'absent', ['username', 'password']]], + ) + + if not HAS_UP2DATE_CLIENT: + module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + + state = module.params['state'] + force = module.params['force'] + activationkey = module.params['activationkey'] + profilename = module.params['profilename'] + sslcacert = module.params['ca_cert'] + systemorgid = module.params['systemorgid'] + channels = module.params['channels'] + enable_eus = module.params['enable_eus'] + nopackages = module.params['nopackages'] + + rhn = Rhn(module=module, username=username, password=password) + + # use the provided server url and persist it to the rhn config. + if server_url: + rhn.configure_server_url(server_url) + + if not rhn.server_url: + module.fail_json( + msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)" + ) + + # Ensure system is registered + if state == 'present': + + # Check for missing parameters ... + if not (activationkey or rhn.username or rhn.password): + module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, + rhn.password)) + if not activationkey and not (rhn.username and rhn.password): + module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") + + # Register system + if rhn.is_registered and not force: + module.exit_json(changed=False, msg="System already registered.") + + try: + rhn.enable() + rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages) + rhn.subscribe(channels) + except Exception as exc: + module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc)) + finally: + rhn.logout() + + module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) + + # Ensure system is *not* registered + if state == 'absent': + if not rhn.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + + if not (rhn.username and rhn.password): + module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password") + + try: + rhn.unregister() + except Exception as exc: + module.fail_json(msg="Failed to unregister: %s" % exc) + finally: + rhn.logout() + + module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rhsm_release.py b/ansible_collections/community/general/plugins/modules/rhsm_release.py new file mode 100644 index 000000000..6ac4da6e4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rhsm_release.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Sean Myers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: rhsm_release +short_description: Set or Unset RHSM Release version +description: + - Sets or unsets the release version used by RHSM repositories. +notes: + - This module will fail on an unregistered system. + Use the C(redhat_subscription) module to register a system + prior to setting the RHSM release. + - It is possible to interact with C(subscription-manager) only as root, + so root permissions are required to successfully run this module. +requirements: + - Red Hat Enterprise Linux 6+ with subscription-manager installed +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + release: + description: + - RHSM release version to use. + - To unset either pass C(null) for this option, or omit this option. + type: str +author: + - Sean Myers (@seandst) +''' + +EXAMPLES = ''' +# Set release version to 7.1 +- name: Set RHSM release version + community.general.rhsm_release: + release: "7.1" + +# Set release version to 6Server +- name: Set RHSM release version + community.general.rhsm_release: + release: "6Server" + +# Unset release version +- name: Unset RHSM release release + community.general.rhsm_release: + release: null +''' + +RETURN = ''' +current_release: + description: The current RHSM release version value + returned: success + type: str +''' + +from ansible.module_utils.basic import AnsibleModule + +import os +import re + +# Matches release-like values such as 7.2, 5.10, 6Server, 8 +# but rejects unlikely values, like 100Server, 1.100, 7server etc. +release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') + + +def _sm_release(module, *args): + # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes + # "subscription-manager release --set 0.1" + sm_bin = module.get_bin_path('subscription-manager', required=True) + cmd = '{0} release {1}'.format(sm_bin, " ".join(args)) + # delegate nonzero rc handling to run_command + return module.run_command(cmd, check_rc=True) + + +def get_release(module): + # Get the current release version, or None if release unset + rc, out, err = _sm_release(module, '--show') + try: + match = release_matcher.findall(out)[0] + except IndexError: + # 0'th index did not exist; no matches + match = None + + return match + + +def set_release(module, release): + # Set current release version, or unset if release is None + if release is None: + args = ('--unset',) + else: + args = ('--set', release) + + return _sm_release(module, *args) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + release=dict(type='str'), + ), + supports_check_mode=True + ) + + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + target_release = module.params['release'] + + # sanity check: the target release at least looks like a valid release + if target_release and not release_matcher.findall(target_release): + module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release)) + + # Will fail with useful error from s-m if system not subscribed + current_release = get_release(module) + + changed = (target_release != current_release) + if not module.check_mode and changed: + set_release(module, target_release) + # If setting the release fails, then a fail_json would have exited with + # the s-m error, e.g. "No releases match '7.20'...". If not, then the + # current release is now set to the target release (job's done) + current_release = target_release + + module.exit_json(current_release=current_release, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/ansible_collections/community/general/plugins/modules/rhsm_repository.py new file mode 100644 index 000000000..eea6e3857 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rhsm_repository.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Giovanni Sciortino (@giovannisciortino) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: rhsm_repository +short_description: Manage RHSM repositories using the subscription-manager command +description: + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription + Management entitlement platform using the C(subscription-manager) command. +author: Giovanni Sciortino (@giovannisciortino) +notes: + - In order to manage RHSM repositories the system must be already registered + to RHSM manually or using the Ansible C(redhat_subscription) module. + - It is possible to interact with C(subscription-manager) only as root, + so root permissions are required to successfully run this module. + +requirements: + - subscription-manager +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - If state is equal to present or disabled, indicates the desired + repository state. + choices: [present, enabled, absent, disabled] + default: "enabled" + type: str + name: + description: + - The ID of repositories to enable. + - To operate on several repositories this can accept a comma separated + list or a YAML list. + required: true + type: list + elements: str + purge: + description: + - Disable all currently enabled repositories that are not not specified in C(name). + Only set this to C(True) if passing in a list of repositories to the C(name) field. + Using this with C(loop) will most likely not have the desired result. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Enable a RHSM repository + community.general.rhsm_repository: + name: rhel-7-server-rpms + +- name: Disable all RHSM repositories + community.general.rhsm_repository: + name: '*' + state: disabled + +- name: Enable all repositories starting with rhel-6-server + community.general.rhsm_repository: + name: rhel-6-server* + state: enabled + +- name: Disable all repositories except rhel-7-server-rpms + community.general.rhsm_repository: + name: rhel-7-server-rpms + purge: true +''' + +RETURN = ''' +repositories: + description: + - The list of RHSM repositories with their states. + - When this module is used to change the repository states, this list contains the updated states after the changes. + returned: success + type: list +''' + +import re +import os +from fnmatch import fnmatch +from copy import deepcopy +from ansible.module_utils.basic import AnsibleModule + + +def run_subscription_manager(module, arguments): + # Execute subscription-manager with arguments and manage common errors + rhsm_bin = module.get_bin_path('subscription-manager') + if not rhsm_bin: + module.fail_json(msg='The executable file subscription-manager was not found in PATH') + + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env) + + if rc == 0 and out == 'This system has no repositories available through subscriptions.\n': + module.fail_json(msg='This system has no repositories available through subscriptions') + elif rc == 1: + module.fail_json(msg='subscription-manager failed with the following error: %s' % err) + else: + return rc, out, err + + +def get_repository_list(module, list_parameter): + # Generate RHSM repository list and return a list of dict + if list_parameter == 'list_enabled': + rhsm_arguments = ['repos', '--list-enabled'] + elif list_parameter == 'list_disabled': + rhsm_arguments = ['repos', '--list-disabled'] + elif list_parameter == 'list': + rhsm_arguments = ['repos', '--list'] + rc, out, err = run_subscription_manager(module, rhsm_arguments) + + skip_lines = [ + '+----------------------------------------------------------+', + ' Available Repositories in /etc/yum.repos.d/redhat.repo' + ] + repo_id_re = re.compile(r'Repo ID:\s+(.*)') + repo_name_re = re.compile(r'Repo Name:\s+(.*)') + repo_url_re = re.compile(r'Repo URL:\s+(.*)') + repo_enabled_re = re.compile(r'Enabled:\s+(.*)') + + repo_id = '' + repo_name = '' + repo_url = '' + repo_enabled = '' + + repo_result = [] + for line in out.splitlines(): + if line == '' or line in skip_lines: + continue + + repo_id_match = repo_id_re.match(line) + if repo_id_match: + repo_id = repo_id_match.group(1) + continue + + repo_name_match = repo_name_re.match(line) + if repo_name_match: + repo_name = repo_name_match.group(1) + continue + + repo_url_match = repo_url_re.match(line) + if repo_url_match: + repo_url = repo_url_match.group(1) + continue + + repo_enabled_match = repo_enabled_re.match(line) + if repo_enabled_match: + repo_enabled = repo_enabled_match.group(1) + + repo = { + "id": repo_id, + "name": repo_name, + "url": repo_url, + "enabled": True if repo_enabled == '1' else False + } + + repo_result.append(repo) + + return repo_result + + +def repository_modify(module, state, name, purge=False): + name = set(name) + current_repo_list = get_repository_list(module, 'list') + updated_repo_list = deepcopy(current_repo_list) + matched_existing_repo = {} + for repoid in name: + matched_existing_repo[repoid] = [] + for idx, repo in enumerate(current_repo_list): + if fnmatch(repo['id'], repoid): + matched_existing_repo[repoid].append(repo) + # Update current_repo_list to return it as result variable + updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False + + changed = False + results = [] + diff_before = "" + diff_after = "" + rhsm_arguments = ['repos'] + + for repoid in matched_existing_repo: + if len(matched_existing_repo[repoid]) == 0: + results.append("%s is not a valid repository ID" % repoid) + module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid) + for repo in matched_existing_repo[repoid]: + if state in ['disabled', 'absent']: + if repo['enabled']: + changed = True + diff_before += "Repository '%s' is enabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is disabled for this system\n" % repo['id'] + results.append("Repository '%s' is disabled for this system" % repo['id']) + rhsm_arguments += ['--disable', repo['id']] + elif state in ['enabled', 'present']: + if not repo['enabled']: + changed = True + diff_before += "Repository '%s' is disabled for this system\n" % repo['id'] + diff_after += "Repository '%s' is enabled for this system\n" % repo['id'] + results.append("Repository '%s' is enabled for this system" % repo['id']) + rhsm_arguments += ['--enable', repo['id']] + + # Disable all enabled repos on the system that are not in the task and not + # marked as disabled by the task + if purge: + enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled']) + matched_repoids_set = set(matched_existing_repo.keys()) + difference = enabled_repo_ids.difference(matched_repoids_set) + if len(difference) > 0: + for repoid in difference: + changed = True + diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid)) + diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid)) + results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid)) + rhsm_arguments.extend(['--disable', repoid]) + + diff = {'before': diff_before, + 'after': diff_after, + 'before_header': "RHSM repositories", + 'after_header': "RHSM repositories"} + + if not module.check_mode and changed: + rc, out, err = run_subscription_manager(module, rhsm_arguments) + results = out.splitlines() + module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='list', elements='str', required=True), + state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'), + purge=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + name = module.params['name'] + state = module.params['state'] + purge = module.params['purge'] + + repository_modify(module, state, name, purge) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/riak.py b/ansible_collections/community/general/plugins/modules/riak.py new file mode 100644 index 000000000..024e5424d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/riak.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, James Martin , Drew Kerrigan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: riak +short_description: This module handles some common Riak operations +description: + - This module can be used to join nodes to a cluster, check + the status of the cluster. +author: + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - The command you would like to perform against the cluster. + choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] + type: str + config_dir: + description: + - The path to the riak configuration directory + default: /etc/riak + type: path + http_conn: + description: + - The ip address and port that is listening for Riak HTTP queries + default: 127.0.0.1:8098 + type: str + target_node: + description: + - The target node for certain operations (join, ping) + default: riak@127.0.0.1 + type: str + wait_for_handoffs: + description: + - Number of seconds to wait for handoffs to complete. + type: int + default: 0 + wait_for_ring: + description: + - Number of seconds to wait for all nodes to agree on the ring. + type: int + default: 0 + wait_for_service: + description: + - Waits for a riak service to come online before continuing. + choices: ['kv'] + type: str + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true +''' + +EXAMPLES = ''' +- name: "Join's a Riak node to another node" + community.general.riak: + command: join + target_node: riak@10.1.1.1 + +- name: Wait for handoffs to finish. Use with async and poll. + community.general.riak: + wait_for_handoffs: true + +- name: Wait for riak_kv service to startup + community.general.riak: + wait_for_service: kv +''' + +import json +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def ring_check(module, riak_admin_bin): + cmd = '%s ringready' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0 and 'TRUE All nodes agree on the ring' in out: + return True + else: + return False + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + command=dict(required=False, default=None, choices=[ + 'ping', 'kv_test', 'join', 'plan', 'commit']), + config_dir=dict(default='/etc/riak', type='path'), + http_conn=dict(required=False, default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1', required=False), + wait_for_handoffs=dict(default=0, type='int'), + wait_for_ring=dict(default=0, type='int'), + wait_for_service=dict( + required=False, default=None, choices=['kv']), + validate_certs=dict(default=True, type='bool')) + ) + + command = module.params.get('command') + http_conn = module.params.get('http_conn') + target_node = module.params.get('target_node') + wait_for_handoffs = module.params.get('wait_for_handoffs') + wait_for_ring = module.params.get('wait_for_ring') + wait_for_service = module.params.get('wait_for_service') + + # make sure riak commands are on the path + riak_bin = module.get_bin_path('riak') + riak_admin_bin = module.get_bin_path('riak-admin') + + timeout = time.time() + 120 + while True: + if time.time() > timeout: + module.fail_json(msg='Timeout, could not fetch Riak stats.') + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() + break + time.sleep(5) + + # here we attempt to load those stats, + try: + stats = json.loads(stats_raw) + except Exception: + module.fail_json(msg='Could not parse Riak stats.') + + node_name = stats['nodename'] + nodes = stats['ring_members'] + ring_size = stats['ring_creation_size'] + rc, out, err = module.run_command([riak_bin, 'version']) + version = out.strip() + + result = dict(node_name=node_name, + nodes=nodes, + ring_size=ring_size, + version=version) + + if command == 'ping': + cmd = '%s ping %s' % (riak_bin, target_node) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['ping'] = out + else: + module.fail_json(msg=out) + + elif command == 'kv_test': + cmd = '%s test' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['kv_test'] = out + else: + module.fail_json(msg=out) + + elif command == 'join': + if nodes.count(node_name) == 1 and len(nodes) > 1: + result['join'] = 'Node is already in cluster or staged to be in cluster.' + else: + cmd = '%s cluster join %s' % (riak_admin_bin, target_node) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['join'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'plan': + cmd = '%s cluster plan' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['plan'] = out + if 'Staged Changes' in out: + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'commit': + cmd = '%s cluster commit' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['commit'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + +# this could take a while, recommend to run in async mode + if wait_for_handoffs: + timeout = time.time() + wait_for_handoffs + while True: + cmd = '%s transfers' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if 'No transfers active' in out: + result['handoffs'] = 'No transfers active.' + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for handoffs.') + + if wait_for_service: + cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] + rc, out, err = module.run_command(cmd) + result['service'] = out + + if wait_for_ring: + timeout = time.time() + wait_for_ring + while True: + if ring_check(module, riak_admin_bin): + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for nodes to agree on ring.') + + result['ring_ready'] = ring_check(module, riak_admin_bin) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rocketchat.py b/ansible_collections/community/general/plugins/modules/rocketchat.py new file mode 100644 index 000000000..23d6d529e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rocketchat.py @@ -0,0 +1,250 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Deepak Kothandan +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: rocketchat +short_description: Send notifications to Rocket Chat +description: + - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration +author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + domain: + type: str + description: + - The domain for your environment without protocol. (i.e. + C(example.com) or C(chat.example.com)) + required: true + token: + type: str + description: + - Rocket Chat Incoming Webhook integration token. This provides + authentication to Rocket Chat's Incoming webhook for posting + messages. + required: true + protocol: + type: str + description: + - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) + default: https + choices: + - 'http' + - 'https' + msg: + type: str + description: + - Message to be sent. + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) + specified during the creation of webhook. + username: + type: str + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + type: str + description: + - URL for the message sender's icon. + default: "https://docs.ansible.com/favicon.ico" + icon_emoji: + type: str + description: + - Emoji for the message sender. The representation for the available emojis can be + got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) + link_names: + type: int + description: + - Automatically create links for channels and usernames in I(msg). + default: 1 + choices: + - 1 + - 0 + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + color: + type: str + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' + attachments: + type: list + elements: dict + description: + - Define a list of attachments. +''' + +EXAMPLES = """ +- name: Send notification message via Rocket Chat + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message via Rocket Chat all options + community.general.rocketchat: + domain: chat.example.com + token: thetoken/generatedby/rocketchat + msg: '{{ inventory_hostname }} completed' + channel: #ansible + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + delegate_to: localhost + +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + delegate_to: localhost + +- name: Use the attachments API + community.general.rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + attachments: + - text: Display my system load on host A and B + color: #ff00dd + title: System load + fields: + - title: System A + value: 'load average: 0,74, 0,66, 0,63' + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true + delegate_to: localhost +""" + +RETURN = """ +changed: + description: A flag indicating if any change was made or not. + returned: success + type: bool + sample: false +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' + + +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=text) + elif text is not None: + payload = dict(attachments=[dict(text=text, color=color)]) + if channel is not None: + if (channel[0] == '#') or (channel[0] == '@'): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + for attachment in attachments: + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + payload['attachments'].append(attachment) + + payload = "payload=" + module.jsonify(payload) + return payload + + +def do_notify_rocketchat(module, domain, token, protocol, payload): + + if token.count('/') < 1: + module.fail_json(msg="Invalid Token specified, provide a valid token") + + rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) + + response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) + if info['status'] != 200: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True), + token=dict(type='str', required=True, no_log=True), + protocol=dict(type='str', default='https', choices=['http', 'https']), + msg=dict(type='str', required=False), + channel=dict(type='str'), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + icon_emoji=dict(type='str'), + link_names=dict(type='int', default=1, choices=[0, 1]), + validate_certs=dict(default=True, type='bool'), + color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), + attachments=dict(type='list', elements='dict', required=False) + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + protocol = module.params['protocol'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + color = module.params['color'] + attachments = module.params['attachments'] + + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) + do_notify_rocketchat(module, domain, token, protocol, payload) + + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py new file mode 100644 index 000000000..314e65bc6 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +author: "Max Riveiro (@kavu)" +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - Your project access token. + required: true + environment: + type: str + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + type: str + description: + - Revision number/sha being deployed. + required: true + user: + type: str + description: + - User who deployed. + required: false + rollbar_user: + type: str + description: + - Rollbar username of the user who deployed. + required: false + comment: + type: str + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + type: str + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(false), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: true + type: bool +''' + +EXAMPLES = ''' + - name: Rollbar deployment notification + community.general.rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy + + - name: Notify rollbar about current git revision deployment by current user + community.general.rollbar_deployment: + token: "{{ rollbar_access_token }}" + environment: production + revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" + user: "{{ lookup('env', 'USER') }}" +''' +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default=True, type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urlencode(params) + response, info = fetch_url(module, url, data=data, method='POST') + except Exception as e: + module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc()) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py new file mode 100644 index 000000000..52219cd1b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Dusty Mabe +# Copyright (c) 2018, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: rpm_ostree_pkg +short_description: Install or uninstall overlay additional packages +version_added: "2.0.0" +description: + - Install or uninstall overlay additional packages using C(rpm-ostree) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of overlay package to install or remove. + required: true + type: list + elements: str + aliases: [ pkg ] + state: + description: + - State of the overlay package. + - C(present) simply ensures that a desired package is installed. + - C(absent) removes the specified package. + choices: [ 'absent', 'present' ] + default: 'present' + type: str +author: + - Dusty Mabe (@dustymabe) + - Abhijeet Kasurde (@Akasurde) +''' + +EXAMPLES = r''' +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + +- name: Remove overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: absent +''' + +RETURN = r''' +rc: + description: Return code of rpm-ostree command. + returned: always + type: int + sample: 0 +changed: + description: State changes. + returned: always + type: bool + sample: true +action: + description: Action performed. + returned: always + type: str + sample: 'install' +packages: + description: A list of packages specified. + returned: always + type: list + sample: ['nfs-utils'] +stdout: + description: Stdout of rpm-ostree command. + returned: always + type: str + sample: 'Staging deployment...done\n...' +stderr: + description: Stderr of rpm-ostree command. + returned: always + type: str + sample: '' +cmd: + description: Full command used for performed action. + returned: always + type: str + sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' +''' + +from ansible.module_utils.basic import AnsibleModule + + +class RpmOstreePkg: + def __init__(self, module): + self.module = module + self.params = module.params + self.state = module.params['state'] + + def ensure(self): + results = dict( + rc=0, + changed=False, + action='', + packages=[], + stdout='', + stderr='', + cmd='', + ) + + # Ensure rpm-ostree command exists + cmd = [self.module.get_bin_path('rpm-ostree', required=True)] + + # Decide action to perform + if self.state in ('present'): + results['action'] = 'install' + cmd.append('install') + elif self.state in ('absent'): + results['action'] = 'uninstall' + cmd.append('uninstall') + + # Additional parameters + cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) + for pkg in self.params['name']: + cmd.append(pkg) + results['packages'].append(pkg) + + rc, out, err = self.module.run_command(cmd) + + results.update(dict( + rc=rc, + cmd=' '.join(cmd), + stdout=out, + stderr=err, + )) + + # A few possible options: + # - rc=0 - succeeded in making a change + # - rc=77 - no change was needed + # - rc=? - error + if rc == 0: + results['changed'] = True + elif rc == 77: + results['changed'] = False + results['rc'] = 0 + else: + self.module.fail_json(msg='non-zero return code', **results) + + self.module.exit_json(**results) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict( + default="present", + choices=['absent', 'present'] + ), + name=dict( + aliases=["pkg"], + required=True, + type='list', + elements='str', + ), + ), + ) + + rpm_ostree_pkg = RpmOstreePkg(module) + rpm_ostree_pkg.ensure() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py new file mode 100644 index 000000000..77026e633 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rundeck_acl_policy + +short_description: Manage Rundeck ACL policies +description: + - Create, update and remove Rundeck ACL policies through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + project: + type: str + description: + - Sets the project which receive the ACL policy. + - If unset, it's a system ACL policy. + policy: + type: str + description: + - Sets the ACL policy content. + - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html. + - It can be a YAML string or a pure Ansible inventory YAML object. + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +''' + +EXAMPLES = ''' +- name: Create or update a rundeck ACL policy in project Ansible + community.general.rundeck_acl_policy: + name: "Project_01" + api_version: 18 + url: "https://rundeck.example.org" + token: "mytoken" + state: present + project: "Ansible" + policy: + description: "my policy" + context: + application: rundeck + for: + project: + - allow: read + by: + group: "build" + +- name: Remove a rundeck system policy + community.general.rundeck_acl_policy: + name: "Project_01" + url: "https://rundeck.example.org" + token: "mytoken" + state: absent +''' + +RETURN = ''' +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing ACL policy informations before modification. + returned: success + type: dict +after: + description: Dictionary containing ACL policy informations after modification. + returned: success + type: dict +''' + +# import module snippets +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckACLManager: + def __init__(self, module): + self.module = module + + def get_acl(self): + resp, info = api_request( + module=self.module, + endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + ) + + return resp + + def create_or_update_acl(self): + facts = self.get_acl() + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json(changed=True, before={}, after=self.module.params["policy"]) + + resp, info = api_request( + module=self.module, + endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + method="POST", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" % + self.module.params["name"]) + elif info["status"] == 409: + self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"]) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_acl()) + else: + if facts["contents"] == self.module.params["policy"]: + self.module.exit_json(changed=False, before=facts, after=facts) + + if self.module.check_mode: + self.module.exit_json(changed=True, before=facts, after=facts) + + resp, info = api_request( + module=self.module, + endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + method="PUT", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 200: + self.module.exit_json(changed=True, before=facts, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" % + self.module.params["name"]) + elif info["status"] == 404: + self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"]) + + def remove_acl(self): + facts = self.get_acl() + + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint="system/acl/%s.aclpolicy" % self.module.params["name"], + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + policy=dict(type='str'), + project=dict(type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'present', ['policy']], + ], + supports_check_mode=True, + ) + + if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])): + module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-") + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckACLManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_acl() + elif module.params['state'] == 'absent': + rundeck.remove_acl() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py b/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py new file mode 100644 index 000000000..818bde83c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rundeck_job_executions_info +short_description: Query executions for a Rundeck job +description: + - This module gets the list of executions for a specified Rundeck job. +author: "Phillipe Smith (@phsmith)" +version_added: 3.8.0 +options: + job_id: + type: str + description: + - The job unique ID. + required: true + status: + type: str + description: + - The job status to filter. + choices: [succeeded, failed, aborted, running] + max: + type: int + description: + - Max results to return. + default: 20 + offset: + type: int + description: + - The start point to return the results. + default: 0 +extends_documentation_fragment: + - community.general.rundeck + - url + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = ''' +- name: Get Rundeck job executions info + community.general.rundeck_job_executions_info: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + register: rundeck_job_executions_info + +- name: Show Rundeck job executions info + ansible.builtin.debug: + var: rundeck_job_executions_info.executions +''' + +RETURN = ''' +paging: + description: Results pagination info. + returned: success + type: dict + contains: + count: + description: Number of results in the response. + type: int + returned: success + total: + description: Total number of results. + type: int + returned: success + offset: + description: Offset from first of all results. + type: int + returned: success + max: + description: Maximum number of results per page. + type: int + returned: success + sample: { + "count": 20, + "total": 100, + "offset": 0, + "max": 20 + } +executions: + description: Job executions list. + returned: always + type: list + elements: dict + sample: [ + { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633525515026, + "date": "2021-10-06T13:05:15Z" + }, + "date-ended": { + "unixtime": 1633525518386, + "date": "2021-10-06T13:05:18Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 6381, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request +) + + +class RundeckJobExecutionsInfo(object): + def __init__(self, module): + self.module = module + self.url = self.module.params["url"] + self.api_version = self.module.params["api_version"] + self.job_id = self.module.params["job_id"] + self.offset = self.module.params["offset"] + self.max = self.module.params["max"] + self.status = self.module.params["status"] or "" + + def job_executions(self): + response, info = api_request( + module=self.module, + endpoint="job/%s/executions?offset=%s&max=%s&status=%s" + % (quote(self.job_id), self.offset, self.max, self.status), + method="GET" + ) + + if info["status"] != 200: + self.module.fail_json( + msg=info["msg"], + executions=response + ) + + self.module.exit_json(msg="Executions info result", **response) + + +def main(): + argument_spec = api_argument_spec() + argument_spec.update(dict( + job_id=dict(required=True, type="str"), + offset=dict(type="int", default=0), + max=dict(type="int", default=20), + status=dict( + type="str", + choices=["succeeded", "failed", "aborted", "running"] + ) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckJobExecutionsInfo(module) + rundeck.job_executions() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py new file mode 100644 index 000000000..894f1bb6f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rundeck_job_run +short_description: Run a Rundeck job +description: + - This module runs a Rundeck job specified by ID. +author: "Phillipe Smith (@phsmith)" +version_added: 3.8.0 +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + job_id: + type: str + description: + - The job unique ID. + required: true + job_options: + type: dict + description: + - The job options for the steps. + - Numeric values must be quoted. + filter_nodes: + type: str + description: + - Filter the nodes where the jobs must run. + - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). + run_at_time: + type: str + description: + - Schedule the job execution to run at specific date and time. + - ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00). + loglevel: + type: str + description: + - Log level configuration. + choices: [debug, verbose, info, warn, error] + default: info + wait_execution: + type: bool + description: + - Wait until the job finished the execution. + default: true + wait_execution_delay: + type: int + description: + - Delay, in seconds, between job execution status check requests. + default: 5 + wait_execution_timeout: + type: int + description: + - Job execution wait timeout in seconds. + - If the timeout is reached, the job will be aborted. + - Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check. + default: 120 + abort_on_timeout: + type: bool + description: + - Send a job abort request if exceeded the I(wait_execution_timeout) specified. + default: false +extends_documentation_fragment: + - community.general.rundeck + - ansible.builtin.url + - community.general.attributes +''' + +EXAMPLES = ''' +- name: Run a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + register: rundeck_job_run + +- name: Show execution info + ansible.builtin.debug: + var: rundeck_job_run.execution_info + +- name: Run a Rundeck job with options + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + job_options: + option_1: "value_1" + option_2: "value_3" + option_3: "value_3" + register: rundeck_job_run + +- name: Run a Rundeck job with timeout, delay between status check and abort on timeout + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + wait_execution_timeout: 30 + wait_execution_delay: 10 + abort_on_timeout: true + register: rundeck_job_run + +- name: Schedule a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + run_at_time: "2021-10-05T15:45:00-03:00" + register: rundeck_job_schedule + +- name: Fire-and-forget a Rundeck job + community.general.rundeck_job_run: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + wait_execution: false + register: rundeck_job_run +''' + +RETURN = ''' +execution_info: + description: Rundeck job execution metadata. + returned: always + type: dict + sample: { + "msg": "Job execution succeeded!", + "execution_info": { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633449020784, + "date": "2021-10-05T15:50:20Z" + }, + "date-ended": { + "unixtime": 1633449026358, + "date": "2021-10-05T15:50:26Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 4917, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", + "successfulNodes": [ + "localhost" + ], + "output": "Test!" + } + } +''' + +# Modules import +from datetime import datetime, timedelta +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import quote +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request +) + + +class RundeckJobRun(object): + def __init__(self, module): + self.module = module + self.url = self.module.params["url"] + self.api_version = self.module.params["api_version"] + self.job_id = self.module.params["job_id"] + self.job_options = self.module.params["job_options"] or {} + self.filter_nodes = self.module.params["filter_nodes"] or "" + self.run_at_time = self.module.params["run_at_time"] or "" + self.loglevel = self.module.params["loglevel"].upper() + self.wait_execution = self.module.params['wait_execution'] + self.wait_execution_delay = self.module.params['wait_execution_delay'] + self.wait_execution_timeout = self.module.params['wait_execution_timeout'] + self.abort_on_timeout = self.module.params['abort_on_timeout'] + + for k, v in self.job_options.items(): + if not isinstance(v, str): + self.module.exit_json( + msg="Job option '%s' value must be a string" % k, + execution_info={} + ) + + def job_status_check(self, execution_id): + response = dict() + timeout = False + due = datetime.now() + timedelta(seconds=self.wait_execution_timeout) + + while not timeout: + endpoint = "execution/%d" % execution_id + response = api_request(module=self.module, endpoint=endpoint)[0] + output = api_request(module=self.module, + endpoint="execution/%d/output" % execution_id) + log_output = "\n".join([x["log"] for x in output[0]["entries"]]) + response.update({"output": log_output}) + + if response["status"] == "aborted": + break + elif response["status"] == "scheduled": + self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time, + execution_info=response, + changed=True) + elif response["status"] == "failed": + self.module.fail_json(msg="Job execution failed", + execution_info=response) + elif response["status"] == "succeeded": + self.module.exit_json(msg="Job execution succeeded!", + execution_info=response) + + if datetime.now() >= due: + timeout = True + break + + # Wait for 5s before continue + sleep(self.wait_execution_delay) + + response.update({"timed_out": timeout}) + return response + + def job_run(self): + response, info = api_request( + module=self.module, + endpoint="job/%s/run" % quote(self.job_id), + method="POST", + data={ + "loglevel": self.loglevel, + "options": self.job_options, + "runAtTime": self.run_at_time, + "filter": self.filter_nodes + } + ) + + if info["status"] != 200: + self.module.fail_json(msg=info["msg"]) + + if not self.wait_execution: + self.module.exit_json(msg="Job run send successfully!", + execution_info=response) + + job_status = self.job_status_check(response["id"]) + + if job_status["timed_out"]: + if self.abort_on_timeout: + api_request( + module=self.module, + endpoint="execution/%s/abort" % response['id'], + method="GET" + ) + + abort_status = self.job_status_check(response["id"]) + + self.module.fail_json(msg="Job execution aborted due the timeout specified", + execution_info=abort_status) + + self.module.fail_json(msg="Job execution timed out", + execution_info=job_status) + + +def main(): + argument_spec = api_argument_spec() + argument_spec.update(dict( + job_id=dict(required=True, type="str"), + job_options=dict(type="dict"), + filter_nodes=dict(type="str"), + run_at_time=dict(type="str"), + wait_execution=dict(type="bool", default=True), + wait_execution_delay=dict(type="int", default=5), + wait_execution_timeout=dict(type="int", default=120), + abort_on_timeout=dict(type="bool", default=False), + loglevel=dict( + type="str", + choices=["debug", "verbose", "info", "warn", "error"], + default="info" + ) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckJobRun(module) + rundeck.job_run() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/rundeck_project.py b/ansible_collections/community/general/plugins/modules/rundeck_project.py new file mode 100644 index 000000000..79ca57568 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/rundeck_project.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Ansible module to manage rundeck projects +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: rundeck_project + +short_description: Manage Rundeck projects +description: + - Create and remove Rundeck projects through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +''' + +EXAMPLES = ''' +- name: Create a rundeck project + community.general.rundeck_project: + name: "Project_01" + label: "Project 01" + description: "My Project 01" + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + state: present + +- name: Remove a rundeck project + community.general.rundeck_project: + name: "Project_01" + url: "https://rundeck.example.org" + api_token: "mytoken" + state: absent +''' + +RETURN = ''' +rundeck_response: + description: Rundeck response when a failure occurs + returned: failed + type: str +before: + description: dictionary containing project information before modification + returned: success + type: dict +after: + description: dictionary containing project information after modification + returned: success + type: dict +''' + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckProjectManager(object): + def __init__(self, module): + self.module = module + + def get_project_facts(self): + resp, info = api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + ) + + return resp + + def create_or_update_project(self): + facts = self.get_project_facts() + + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json( + changed=True, + before={}, + after={ + "name": self.module.params["name"] + }, + ) + + resp, info = api_request( + module=self.module, + endpoint="projects", + method="POST", + data={ + "name": self.module.params["name"], + "config": {}, + } + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_project_facts()) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_project_facts()) + else: + self.module.exit_json(changed=False, before=facts, after=facts) + + def remove_project(self): + facts = self.get_project_facts() + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckProjectManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_project() + elif module.params['state'] == 'absent': + rundeck.remove_project() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/runit.py b/ansible_collections/community/general/plugins/modules/runit.py new file mode 100644 index 000000000..7c5882af8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/runit.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: runit +author: + - James Sumners (@jsumners) +short_description: Manage runit services +description: + - Controls runit services on remote hosts using the sv utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - C(started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + service (sv restart) and C(killed) will always bounce the service (sv force-stop). + C(reloaded) will send a HUP (sv reload). + C(once) will run a normally downed sv once (sv once), not really + an idempotent operation. + type: str + choices: [ killed, once, reloaded, restarted, started, stopped ] + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + type: bool + service_dir: + description: + - directory runsv watches for services + type: str + default: /var/service + service_src: + description: + - directory where services are defined, the source of symlinks to service_dir. + type: str + default: /etc/sv +''' + +EXAMPLES = r''' +- name: Start sv dnscache, if not running + community.general.runit: + name: dnscache + state: started + +- name: Stop sv dnscache, if running + community.general.runit: + name: dnscache + state: stopped + +- name: Kill sv dnscache, in all cases + community.general.runit: + name: dnscache + state: killed + +- name: Restart sv dnscache, in all cases + community.general.runit: + name: dnscache + state: restarted + +- name: Reload sv dnscache, in all cases + community.general.runit: + name: dnscache + state: reloaded + +- name: Use alternative sv directory location + community.general.runit: + name: dnscache + state: reloaded + service_dir: /run/service +''' + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Sv(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + def __init__(self, module): + self.extra_paths = [] + self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True) + self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.get_status() + else: + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + self.execute_command([self.svc_cmd, 'force-stop', self.src_full]) + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + # full_state *may* contain information about the logger: + # "down: /etc/service/service-without-logger: 1s, normally up\n" + # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n" + full_state_no_logger = self.full_state.split("; ")[0] + + m = re.search(r'\(pid (\d+)\)', full_state_no_logger) + if m: + self.pid = m.group(1) + + m = re.search(r' (\d+)s', full_state_no_logger) + if m: + self.duration = m.group(1) + + if re.search(r'^run:', full_state_no_logger): + self.state = 'started' + elif re.search(r'^down:', full_state_no_logger): + self.state = 'stopped' + else: + self.state = 'unknown' + return + + def started(self): + return self.start() + + def start(self): + return self.execute_command([self.svc_cmd, 'start', self.svc_full]) + + def stopped(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, 'stop', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, 'once', self.svc_full]) + + def reloaded(self): + return self.reload() + + def reload(self): + return self.execute_command([self.svc_cmd, 'reload', self.svc_full]) + + def restarted(self): + return self.restart() + + def restart(self): + return self.execute_command([self.svc_cmd, 'restart', self.svc_full]) + + def killed(self): + return self.kill() + + def kill(self): + return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(cmd) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e)) + return rc, out, err + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + service_dir=dict(type='str', default='/var/service'), + service_src=dict(type='str', default='/etc/sv'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + + sv = Sv(module) + changed = False + + if enabled is not None and enabled != sv.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + sv.enable() + else: + sv.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != sv.state: + changed = True + if not module.check_mode: + getattr(sv, state)() + + module.exit_json(changed=changed, sv=sv.report()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py new file mode 100644 index 000000000..14b347e44 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: sap_task_list_execute +short_description: Perform SAP Task list execution +version_added: "3.5.0" +description: + - The C(sap_task_list_execute) module depends on C(pyrfc) Python library (version 2.4.0 and upwards). + Depending on distribution you are using, you may need to install additional packages to + have these available. + - Tasks in the task list which requires manual activities will be confirmed automatically. + - This module will use the RFC package C(STC_TM_API). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none + +requirements: + - pyrfc >= 2.4.0 + - xmltodict + +options: + conn_username: + description: The required username for the SAP system. + required: true + type: str + conn_password: + description: The required password for the SAP system. + required: true + type: str + host: + description: The required host for the SAP system. Can be either an FQDN or IP Address. + required: true + type: str + sysnr: + description: + - The system number of the SAP system. + - You must quote the value to ensure retaining the leading zeros. + default: '00' + type: str + client: + description: + - The client number to connect to. + - You must quote the value to ensure retaining the leading zeros. + default: '000' + type: str + task_to_execute: + description: The task list which will be executed. + required: true + type: str + task_parameters: + description: + - The tasks and the parameters for execution. + - If the task list do not need any parameters. This could be empty. + - If only specific tasks from the task list should be executed. + The tasks even when no parameter is needed must be provided. + Alongside with the module parameter I(task_skip=true). + type: list + elements: dict + suboptions: + TASKNAME: + description: The name of the task in the task list. + type: str + required: true + FIELDNAME: + description: The name of the field of the task. + type: str + VALUE: + description: The value which have to be set. + type: raw + task_settings: + description: + - Setting for the execution of the task list. This can be the following as in TCODE SE80 described. + Check Mode C(CHECKRUN), Background Processing Active C(BATCH) (this is the default value), + Asynchronous Execution C(ASYNC), Trace Mode C(TRACE), Server Name C(BATCH_TARGET). + default: ['BATCH'] + type: list + elements: str + task_skip: + description: + - If this parameter is C(true) not defined tasks in I(task_parameters) are skipped. + - This could be the case when only certain tasks should run from the task list. + default: false + type: bool + +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +# Pass in a message +- name: Test task execution + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '01' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + +- name: Pass in input parameters + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_parameters : + - { 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', 'FIELDNAME': 'P_OPT2', 'VALUE': 'X' } + - TASKNAME: CL_STCT_CHECK_SEC_CRYPTO + FIELDNAME: P_OPT3 + VALUE: X + task_settings: batch + +# Exported environement variables. +- name: Hint if module will fail with error message like ImportError libsapnwrfc.so... + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + environment: + SAPNWRFC_HOME: /usr/local/sap/nwrfcsdk + LD_LIBRARY_PATH: /usr/local/sap/nwrfcsdk/lib +''' + +RETURN = r''' +msg: + description: A small execution description. + type: str + returned: always + sample: 'Successful' +out: + description: A complete description of the executed tasks. If this is available. + type: list + elements: dict + returned: on success + sample: [...,{ + "LOG": { + "STCTM_S_LOG": [ + { + "ACTIVITY": "U_CONFIG", + "ACTIVITY_DESCR": "Configuration changed", + "DETAILS": null, + "EXEC_ID": "20210728184903.815739", + "FIELD": null, + "ID": "STC_TASK", + "LOG_MSG_NO": "000000", + "LOG_NO": null, + "MESSAGE": "For radiobutton group ICM too many options are set; choose only one option", + "MESSAGE_V1": "ICM", + "MESSAGE_V2": null, + "MESSAGE_V3": null, + "MESSAGE_V4": null, + "NUMBER": "048", + "PARAMETER": null, + "PERIOD": "M", + "PERIOD_DESCR": "Maintenance", + "ROW": "0", + "SRC_LINE": "170", + "SRC_OBJECT": "CL_STCTM_REPORT_UI IF_STCTM_UI_TASK~SET_PARAMETERS", + "SYSTEM": null, + "TIMESTMP": "20210728184903", + "TSTPNM": "DDIC", + "TYPE": "E" + },... + ]}}] +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import traceback +try: + from pyrfc import Connection +except ImportError: + HAS_PYRFC_LIBRARY = False + PYRFC_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PYRFC_LIBRARY = True + PYRFC_LIBRARY_IMPORT_ERROR = None +try: + import xmltodict +except ImportError: + HAS_XMLTODICT_LIBRARY = False + XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_XMLTODICT_LIBRARY = True + XMLTODICT_LIBRARY_IMPORT_ERROR = None + + +def call_rfc_method(connection, method_name, kwargs): + # PyRFC call function + return connection.call(method_name, **kwargs) + + +def process_exec_settings(task_settings): + # processes task settings to objects + exec_settings = {} + for settings in task_settings: + temp_dict = {settings.upper(): 'X'} + for key, value in temp_dict.items(): + exec_settings[key] = value + return exec_settings + + +def xml_to_dict(xml_raw): + try: + xml_parsed = xmltodict.parse(xml_raw, dict_constructor=dict) + xml_dict = xml_parsed['asx:abap']['asx:values']['SESSION']['TASKLIST'] + except KeyError: + xml_dict = "No logs available." + return xml_dict + + +def run_module(): + + params_spec = dict( + TASKNAME=dict(type='str', required=True), + FIELDNAME=dict(type='str'), + VALUE=dict(type='raw'), + ) + + # define available arguments/parameters a user can pass to the module + module = AnsibleModule( + argument_spec=dict( + # values for connection + conn_username=dict(type='str', required=True), + conn_password=dict(type='str', required=True, no_log=True), + host=dict(type='str', required=True), + sysnr=dict(type='str', default="00"), + client=dict(type='str', default="000"), + # values for execution tasks + task_to_execute=dict(type='str', required=True), + task_parameters=dict(type='list', elements='dict', options=params_spec), + task_settings=dict(type='list', elements='str', default=['BATCH']), + task_skip=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + result = dict(changed=False, msg='', out={}) + + params = module.params + + username = params['conn_username'].upper() + password = params['conn_password'] + host = params['host'] + sysnr = params['sysnr'] + client = params['client'] + + task_parameters = params['task_parameters'] + task_to_execute = params['task_to_execute'] + task_settings = params['task_settings'] + task_skip = params['task_skip'] + + if not HAS_PYRFC_LIBRARY: + module.fail_json( + msg=missing_required_lib('pyrfc'), + exception=PYRFC_LIBRARY_IMPORT_ERROR) + + if not HAS_XMLTODICT_LIBRARY: + module.fail_json( + msg=missing_required_lib('xmltodict'), + exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + + # basic RFC connection with pyrfc + try: + conn = Connection(user=username, passwd=password, ashost=host, sysnr=sysnr, client=client) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong connecting to the SAP system.' + module.fail_json(**result) + + try: + raw_params = call_rfc_method(conn, 'STC_TM_SCENARIO_GET_PARAMETERS', + {'I_SCENARIO_ID': task_to_execute}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'The task list does not exsist.' + module.fail_json(**result) + exec_settings = process_exec_settings(task_settings) + # initialize session task + session_init = call_rfc_method(conn, 'STC_TM_SESSION_BEGIN', + {'I_SCENARIO_ID': task_to_execute, + 'I_INIT_ONLY': 'X'}) + # Confirm Tasks which requires manual activities from Task List Run + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_CONFIRM', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME']}) + if task_skip: + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_SKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_SKIP_DEP_TASKS': 'X'}) + # unskip defined tasks and set parameters + if task_parameters is not None: + for task in task_parameters: + call_rfc_method(conn, 'STC_TM_TASK_UNSKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_UNSKIP_DEP_TASKS': 'X'}) + + call_rfc_method(conn, 'STC_TM_SESSION_SET_PARAMETERS', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IT_PARAMETER': task_parameters}) + # start the task + try: + session_start = call_rfc_method(conn, 'STC_TM_SESSION_RESUME', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IS_EXEC_SETTINGS': exec_settings}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong. See error.' + module.fail_json(**result) + # get task logs because the execution may successfully but the tasks shows errors or warnings + # returned value is ABAPXML https://help.sap.com/doc/abapdocu_755_index_htm/7.55/en-US/abenabap_xslt_asxml_general.htm + session_log = call_rfc_method(conn, 'STC_TM_SESSION_GET_LOG', + {'I_SESSION_ID': session_init['E_SESSION_ID']}) + + task_list = xml_to_dict(session_log['E_LOG']) + + result['changed'] = True + result['msg'] = session_start['E_STATUS_DESCR'] + result['out'] = task_list + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sapcar_extract.py b/ansible_collections/community/general/plugins/modules/sapcar_extract.py new file mode 100644 index 000000000..badd466e1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sapcar_extract.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sapcar_extract +short_description: Manages SAP SAPCAR archives +version_added: "3.2.0" +description: + - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling + information back into Ansible. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - Always returns C(changed=true) in check mode. + diff_mode: + support: none +options: + path: + description: The path to the SAR/CAR file. + type: path + required: true + dest: + description: + - The destination where SAPCAR extracts the SAR file. Missing folders will be created. + If this parameter is not provided it will unpack in the same folder as the SAR file. + type: path + binary_path: + description: + - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). + If this parameter is not provided the module will look in C(PATH). + type: path + signature: + description: + - If C(true) the signature will be extracted. + default: false + type: bool + security_library: + description: + - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. + type: path + manifest: + description: + - The name of the manifest. + default: "SIGNATURE.SMF" + type: str + remove: + description: + - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) + default: false + type: bool +author: + - Rainer Leber (@RainerLeber) +''' + +EXAMPLES = """ +- name: Extract SAR file + community.general.sapcar_extract: + path: "~/source/hana.sar" + +- name: Extract SAR file with destination + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/test/" + +- name: Extract SAR file with destination and download from webserver can be a fileshare as well + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/dest/" + binary_path: "https://myserver/SAPCAR" + +- name: Extract SAR file and delete SAR after extract + community.general.sapcar_extract: + path: "~/source/hana.sar" + remove: true + +- name: Extract SAR file with manifest + community.general.sapcar_extract: + path: "~/source/hana.sar" + signature: true + +- name: Extract SAR file with manifest and rename it + community.general.sapcar_extract: + path: "~/source/hana.sar" + manifest: "MyNewSignature.SMF" + signature: true +""" + +import os +from tempfile import NamedTemporaryFile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url +from ansible.module_utils.common.text.converters import to_native + + +def get_list_of_files(dir_name): + # create a list of file and directories + # names in the given directory + list_of_file = os.listdir(dir_name) + allFiles = list() + # Iterate over all the entries + for entry in list_of_file: + # Create full path + fullPath = os.path.join(dir_name, entry) + # If entry is a directory then get the list of files in this directory + if os.path.isdir(fullPath): + allFiles = allFiles + [fullPath] + allFiles = allFiles + get_list_of_files(fullPath) + else: + allFiles.append(fullPath) + return allFiles + + +def download_SAPCAR(binary_path, module): + bin_path = None + # download sapcar binary if url is provided otherwise path is returned + if binary_path is not None: + if binary_path.startswith('https://') or binary_path.startswith('http://'): + random_file = NamedTemporaryFile(delete=False) + with open_url(binary_path) as response: + with random_file as out_file: + data = response.read() + out_file.write(data) + os.chmod(out_file.name, 0o700) + bin_path = out_file.name + module.add_cleanup_file(bin_path) + else: + bin_path = binary_path + return bin_path + + +def check_if_present(command, path, dest, signature, manifest, module): + # manipuliating output from SAR file for compare with already extracted files + iter_command = [command, '-tvf', path] + sar_out = module.run_command(iter_command)[1] + sar_raw = sar_out.split("\n")[1:] + if dest[-1] != "/": + dest = dest + "/" + sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] + # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false + if not signature: + sar_files = [item for item in sar_files if '.SMF' not in item] + # if signature is renamed manipulate files in list of sar file for compare. + if manifest != "SIGNATURE.SMF": + sar_files = [item for item in sar_files if '.SMF' not in item] + sar_files = sar_files + [manifest] + # get extracted files if present + files_extracted = get_list_of_files(dest) + # compare extracted files with files in sar file + present = all(elem in files_extracted for elem in sar_files) + return present + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + dest=dict(type='path'), + binary_path=dict(type='path'), + signature=dict(type='bool', default=False), + security_library=dict(type='path'), + manifest=dict(type='str', default="SIGNATURE.SMF"), + remove=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + rc, out, err = [0, "", ""] + params = module.params + check_mode = module.check_mode + + path = params['path'] + dest = params['dest'] + signature = params['signature'] + security_library = params['security_library'] + manifest = params['manifest'] + remove = params['remove'] + + bin_path = download_SAPCAR(params['binary_path'], module) + + if dest is None: + dest_head_tail = os.path.split(path) + dest = dest_head_tail[0] + '/' + else: + if not os.path.exists(dest): + os.makedirs(dest, 0o755) + + if bin_path is not None: + command = [module.get_bin_path(bin_path, required=True)] + else: + try: + command = [module.get_bin_path('sapcar', required=True)] + except Exception as e: + module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' + .format(bin_path, to_native(e))) + + present = check_if_present(command[0], path, dest, signature, manifest, module) + + if not present: + command.extend(['-xvf', path, '-R', dest]) + if security_library: + command.extend(['-L', security_library]) + if signature: + command.extend(['-manifest', manifest]) + if not check_mode: + (rc, out, err) = module.run_command(command, check_rc=True) + changed = True + else: + changed = False + out = "already unpacked" + + if remove: + os.remove(path) + + module.exit_json(changed=changed, message=rc, stdout=out, + stderr=err, command=' '.join(command)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/say.py b/ansible_collections/community/general/plugins/modules/say.py new file mode 100644 index 000000000..175e5feb0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/say.py @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: say +short_description: Makes a computer to speak +description: + - makes a computer speak! Amuse your friends, annoy your coworkers! +notes: + - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). + - If you like this module, you may also be interested in the osx_say callback plugin. + - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + msg: + type: str + description: + - What to say. + required: true + voice: + type: str + description: + - What voice to use. + required: false +requirements: [ say or espeak or espeak-ng ] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +''' + +EXAMPLES = ''' +- name: Makes a computer to speak + community.general.say: + msg: '{{ inventory_hostname }} is all done' + voice: Zarvox + delegate_to: localhost +''' +import platform + +from ansible.module_utils.basic import AnsibleModule + + +def say(module, executable, msg, voice): + cmd = [executable, msg] + if voice: + cmd.extend(('-v', voice)) + module.run_command(cmd, check_rc=True) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + voice=dict(required=False), + ), + supports_check_mode=True + ) + + msg = module.params['msg'] + voice = module.params['voice'] + possibles = ('say', 'espeak', 'espeak-ng') + + if platform.system() != 'Darwin': + # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter + voice = None + + for possible in possibles: + executable = module.get_bin_path(possible) + if executable: + break + else: + module.fail_json(msg='Unable to find either %s' % ', '.join(possibles)) + + if module.check_mode: + module.exit_json(msg=msg, changed=False) + + say(module, executable, msg, voice) + + module.exit_json(msg=msg, changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/scaleway_compute.py new file mode 100644 index 000000000..9bd821807 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_compute.py @@ -0,0 +1,699 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Compute management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_compute +short_description: Scaleway compute management module +author: Remy Leone (@remyleone) +description: + - "This module manages compute instances on Scaleway." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + public_ip: + type: str + description: + - Manage public IP on a Scaleway server + - Could be Scaleway IP address UUID + - C(dynamic) Means that IP is destroyed at the same time the host is destroyed + - C(absent) Means no public IP at all + default: absent + + enable_ipv6: + description: + - Enable public IPv6 connectivity on the instance + default: false + type: bool + + image: + type: str + description: + - Image identifier used to start the instance with + required: true + + name: + type: str + description: + - Name of the instance + + organization: + type: str + description: + - Organization identifier. + - Exactly one of I(project) and I(organization) must be specified. + + project: + type: str + description: + - Project identifier. + - Exactly one of I(project) and I(organization) must be specified. + version_added: 4.3.0 + + state: + type: str + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + - running + - restarted + - stopped + + tags: + type: list + elements: str + description: + - List of tags to apply to the instance (5 max) + required: false + default: [] + + region: + type: str + description: + - Scaleway compute zone + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + commercial_type: + type: str + description: + - Commercial name of the compute node + required: true + + wait: + description: + - Wait for the instance to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + type: int + description: + - Time to wait for the server to reach the expected state + required: false + default: 300 + + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the server + required: false + default: 3 + + security_group: + type: str + description: + - Security group unique identifier + - If no value provided, the default security group or current security group will be used + required: false +''' + +EXAMPLES = ''' +- name: Create a server + community.general.scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + tags: + - test + - www + +- name: Create a server attached to a security group + community.general.scaleway_compute: + name: foobar + state: present + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S + security_group: 4a31b633-118e-4900-bd52-facf1085fc8d + tags: + - test + - www + +- name: Destroy it right after + community.general.scaleway_compute: + name: foobar + state: absent + image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe + project: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: ams1 + commercial_type: VC1S +''' + +RETURN = ''' +''' + +import datetime +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + +SCALEWAY_SERVER_STATES = ( + 'stopped', + 'stopping', + 'starting', + 'running', + 'locked' +) + +SCALEWAY_TRANSITIONS_STATES = ( + "stopping", + "starting", + "pending" +) + + +def check_image_id(compute_api, image_id): + response = compute_api.get(path="images/%s" % image_id) + + if not response.ok: + msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json) + compute_api.module.fail_json(msg=msg) + + +def fetch_state(compute_api, server): + compute_api.module.debug("fetch_state of server: %s" % server["id"]) + response = compute_api.get(path="servers/%s" % server["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"])) + return response.json["server"]["state"] + except KeyError: + compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(compute_api, server, wait=None): + if wait is None: + wait = compute_api.module.params["wait"] + if not wait: + return + + wait_timeout = compute_api.module.params["wait_timeout"] + wait_sleep_time = compute_api.module.params["wait_sleep_time"] + + start = datetime.datetime.utcnow() + end = start + datetime.timedelta(seconds=wait_timeout) + while datetime.datetime.utcnow() < end: + compute_api.module.debug("We are going to wait for the server to finish its transition") + if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: + compute_api.module.debug("It seems that the server is not in transition anymore.") + compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server)) + break + time.sleep(wait_sleep_time) + else: + compute_api.module.fail_json(msg="Server takes too long to finish its transition") + + +def public_ip_payload(compute_api, public_ip): + # We don't want a public ip + if public_ip in ("absent",): + return {"dynamic_ip_required": False} + + # IP is only attached to the instance and is released as soon as the instance terminates + if public_ip in ("dynamic", "allocated"): + return {"dynamic_ip_required": True} + + # We check that the IP we want to attach exists, if so its ID is returned + response = compute_api.get("ips") + if not response.ok: + msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + ip_list = [] + try: + ip_list = response.json["ips"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json) + + lookup = [ip["id"] for ip in ip_list] + if public_ip in lookup: + return {"public_ip": public_ip} + + +def create_server(compute_api, server): + compute_api.module.debug("Starting a create_server") + target_server = None + data = {"enable_ipv6": server["enable_ipv6"], + "tags": server["tags"], + "commercial_type": server["commercial_type"], + "image": server["image"], + "dynamic_ip_required": server["dynamic_ip_required"], + "name": server["name"] + } + + if server["project"]: + data["project"] = server["project"] + + if server["organization"]: + data["organization"] = server["organization"] + + if server["security_group"]: + data["security_group"] = server["security_group"] + + response = compute_api.post(path="servers", data=data) + + if not response.ok: + msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def restart_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="reboot") + + +def stop_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweroff") + + +def start_server(compute_api, server): + return perform_action(compute_api=compute_api, server=server, action="poweron") + + +def perform_action(compute_api, server, action): + response = compute_api.post(path="servers/%s/action" % server["id"], + data={"action": action}) + if not response.ok: + msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def remove_server(compute_api, server): + compute_api.module.debug("Starting remove server strategy") + response = compute_api.delete(path="servers/%s" % server["id"]) + if not response.ok: + msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=server) + + return response + + +def present_strategy(compute_api, wished_server): + compute_api.module.debug("Starting present strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + return changed, target_server + + +def absent_strategy(compute_api, wished_server): + compute_api.module.debug("Starting absent strategy") + changed = False + target_server = None + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + return changed, {"status": "Server already absent."} + else: + target_server = query_results[0] + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be made absent." % target_server["id"]} + + # A server MUST be stopped to be deleted. + while fetch_state(compute_api=compute_api, server=target_server) != "stopped": + wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) + response = stop_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=err_msg) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True) + + response = remove_server(compute_api=compute_api, server=target_server) + + if not response.ok: + err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=err_msg) + + return changed, {"status": "Server %s deleted" % target_server["id"]} + + +def running_strategy(compute_api, wished_server): + compute_api.module.debug("Starting running strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being run."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("running", "starting"): + compute_api.module.debug("running_strategy: Server in state: %s" % current_state) + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s attributes would be changed." % target_server["id"]} + + response = start_server(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def stop_strategy(compute_api, wished_server): + compute_api.module.debug("Starting stop strategy") + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + changed = False + + if not query_results: + + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being stopped."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + changed = True + else: + target_server = query_results[0] + + compute_api.module.debug("stop_strategy: Servers are found.") + + if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before stopping it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + current_state = fetch_state(compute_api=compute_api, server=target_server) + if current_state not in ("stopped",): + compute_api.module.debug("stop_strategy: Server in state: %s" % current_state) + + changed = True + + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be stopped." % target_server["id"]} + + response = stop_server(compute_api=compute_api, server=target_server) + compute_api.module.debug(response.json) + compute_api.module.debug(response.ok) + + if not response.ok: + msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +def restart_strategy(compute_api, wished_server): + compute_api.module.debug("Starting restart strategy") + changed = False + query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1) + + if not query_results: + changed = True + if compute_api.module.check_mode: + return changed, {"status": "A server would be created before being rebooted."} + + target_server = create_server(compute_api=compute_api, server=wished_server) + else: + target_server = query_results[0] + + if server_attributes_should_be_changed(compute_api=compute_api, + target_server=target_server, + wished_server=wished_server): + changed = True + + if compute_api.module.check_mode: + return changed, { + "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]} + + target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server) + + changed = True + if compute_api.module.check_mode: + return changed, {"status": "Server %s would be rebooted." % target_server["id"]} + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + if fetch_state(compute_api=compute_api, server=target_server) in ("running",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",): + response = restart_server(compute_api=compute_api, server=target_server) + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + if not response.ok: + msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code, + response.json) + compute_api.module.fail_json(msg=msg) + + return changed, target_server + + +state_strategy = { + "present": present_strategy, + "restarted": restart_strategy, + "stopped": stop_strategy, + "running": running_strategy, + "absent": absent_strategy +} + + +def find(compute_api, wished_server, per_page=1): + compute_api.module.debug("Getting inside find") + # Only the name attribute is accepted in the Compute query API + response = compute_api.get("servers", params={"name": wished_server["name"], + "per_page": per_page}) + + if not response.ok: + msg = 'Error during server search: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + search_results = response.json["servers"] + + return search_results + + +PATCH_MUTABLE_SERVER_ATTRIBUTES = ( + "ipv6", + "tags", + "name", + "dynamic_ip_required", + "security_group", +) + + +def server_attributes_should_be_changed(compute_api, target_server, wished_server): + compute_api.module.debug("Checking if server attributes should be changed") + compute_api.module.debug("Current Server: %s" % target_server) + compute_api.module.debug("Wished Server: %s" % wished_server) + debug_dict = dict((x, (target_server[x], wished_server[x])) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server) + compute_api.module.debug("Debug dict %s" % debug_dict) + try: + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys( + ) and target_server[key]["id"] != wished_server[key]: + return True + # Handling other structure compare simply the two objects content + elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]: + return True + return False + except AttributeError: + compute_api.module.fail_json(msg="Error while checking if attributes should be changed") + + +def server_change_attributes(compute_api, target_server, wished_server): + compute_api.module.debug("Starting patching server attributes") + patch_payload = dict() + + for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: + if key in target_server and key in wished_server: + # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook + if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: + # Setting all key to current value except ID + key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") + # Setting ID to the user specified ID + key_dict["id"] = wished_server[key] + patch_payload[key] = key_dict + elif not isinstance(target_server[key], dict): + patch_payload[key] = wished_server[key] + + response = compute_api.patch(path="servers/%s" % target_server["id"], + data=patch_payload) + if not response.ok: + msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json) + compute_api.module.fail_json(msg=msg) + + try: + target_server = response.json["server"] + except KeyError: + compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json) + + wait_to_complete_state_transition(compute_api=compute_api, server=target_server) + + return target_server + + +def core(module): + region = module.params["region"] + wished_server = { + "state": module.params["state"], + "image": module.params["image"], + "name": module.params["name"], + "commercial_type": module.params["commercial_type"], + "enable_ipv6": module.params["enable_ipv6"], + "tags": module.params["tags"], + "organization": module.params["organization"], + "project": module.params["project"], + "security_group": module.params["security_group"] + } + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + compute_api = Scaleway(module=module) + + check_image_id(compute_api, wished_server["image"]) + + # IP parameters of the wished server depends on the configuration + ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"]) + wished_server.update(ip_payload) + + changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server) + module.exit_json(changed=changed, msg=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + image=dict(required=True), + name=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + commercial_type=dict(required=True), + enable_ipv6=dict(default=False, type="bool"), + public_ip=dict(default="absent"), + state=dict(choices=list(state_strategy.keys()), default='present'), + tags=dict(type="list", elements="str", default=[]), + organization=dict(), + project=dict(), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + security_group=dict(), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('organization', 'project'), + ], + required_one_of=[ + ('organization', 'project'), + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py new file mode 100644 index 000000000..9a9d9adde --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway VPC management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_compute_private_network +short_description: Scaleway compute - private network management +version_added: 5.2.0 +author: Pascal MANGIN (@pastral) +description: + - This module add or remove a private network to a compute instance + (U(https://developer.scaleway.com)). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the VPC. + default: present + choices: + - present + - absent + + project: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + compute_id: + type: str + description: + - ID of the compute instance (see M(community.general.scaleway_compute)). + required: true + + private_network_id: + type: str + description: + - ID of the private network (see M(community.general.scaleway_private_network)). + required: true + +''' + +EXAMPLES = ''' +- name: Plug a VM to a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: present + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" + register: nicsvpc_creation_task + +- name: Unplug a VM from a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: absent + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" + +''' + +RETURN = ''' +scaleway_compute_private_network: + description: Information on the VPC. + returned: success when I(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +''' +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def get_nics_info(api, compute_id, private_network_id): + + response = api.get('servers/' + compute_id + '/private_nics') + if not response.ok: + msg = "Error during get servers information: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + + i = 0 + list_nics = response.json['private_nics'] + + while i < len(list_nics): + if list_nics[i]['private_network_id'] == private_network_id: + return list_nics[i] + i += 1 + + return None + + +def present_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is not None: + return changed, nic + + data = {"private_network_id": private_network_id} + changed = True + if api.module.check_mode: + return changed, {"status": "a private network would be add to a server"} + + response = api.post(path='servers/' + compute_id + '/private_nics', data=data) + + if not response.ok: + api.module.fail_json(msg='Error when adding a private network to a server [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + +def absent_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is None: + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be destroyed"} + + response = api.delete('servers/' + compute_id + '/private_nics/' + nic['id']) + + if not response.ok: + api.module.fail_json(msg='Error deleting private network from server [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + + compute_id = module.params['compute_id'] + pn_id = module.params['private_network_id'] + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + else: + changed, summary = present_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + module.exit_json(changed=changed, scaleway_compute_private_network=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + compute_id=dict(required=True), + private_network_id=dict(required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container.py b/ansible_collections/community/general/plugins/modules/scaleway_container.py new file mode 100644 index 000000000..19ffae419 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container.py @@ -0,0 +1,412 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless container management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container +short_description: Scaleway Container management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the container. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the container. + type: int + + max_scale: + description: + - Maximum number of replicas for the container. + type: int + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in container at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating thoses values will not output a C(changed) state in Ansible. + - Injected in container at runtime. + type: dict + default: {} + + memory_limit: + description: + - Resources define performance characteristics of your container. + - They are allocated to your container at runtime. + type: int + + container_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + privacy: + description: + - Privacy policies define whether a container can be executed anonymously. + - Choose C(public) to enable anonymous execution, or C(private) to protect your container with an authentication mechanism provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + registry_image: + description: + - The name of image used for the container. + type: str + required: true + + max_concurrency: + description: + - Maximum number of connections per container. + - This parameter will be used to trigger autoscaling. + type: int + + protocol: + description: + - Communication protocol of the container. + type: str + default: http1 + choices: + - http1 + - h2c + + port: + description: + - Listen port used to expose the container. + type: int + + redeploy: + description: + - Redeploy the container if update is required. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Create a container + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: present + region: fr-par + name: my-awesome-container + registry_image: rg.fr-par.scw.cloud/funcscwtestrgy2f9zw/nginx:latest + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_creation_task + +- name: Make sure container is deleted + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: absent + region: fr-par + name: my-awesome-container +''' + +RETURN = ''' +container: + description: The container information. + returned: when I(state=present) + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +''' + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "memory_limit", + "timeout", + "privacy", + "registry_image", + "max_concurrency", + "protocol", + "port", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "namespace_id": wished_cn["namespace_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "min_scale": wished_cn["min_scale"], + "max_scale": wished_cn["max_scale"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]), + "memory_limit": wished_cn["memory_limit"], + "timeout": wished_cn["timeout"], + "privacy": wished_cn["privacy"], + "registry_image": wished_cn["registry_image"], + "max_concurrency": wished_cn["max_concurrency"], + "protocol": wished_cn["protocol"], + "port": wished_cn["port"], + "redeploy": wished_cn["redeploy"] + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = dict((cn["name"], cn) + for cn in cn_list) + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = dict((cn["name"], cn) + for cn in cn_list) + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_cn["redeploy"] + + # Create container + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params["min_scale"], + "max_scale": module.params["max_scale"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["container_timeout"], + "privacy": module.params["privacy"], + "registry_image": module.params["registry_image"], + "max_concurrency": module.params["max_concurrency"], + "protocol": module.params["protocol"], + "port": module.params["port"], + "redeploy": module.params["redeploy"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + changed, summary = state_strategy[wished_container["state"]](api=api, wished_cn=wished_container) + + module.exit_json(changed=changed, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + memory_limit=dict(type='int'), + container_timeout=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + registry_image=dict(type='str', required=True), + max_concurrency=dict(type='int'), + protocol=dict(type='str', default='http1', choices=['http1', 'h2c']), + port=dict(type='int'), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_info.py new file mode 100644 index 000000000..20ebece21 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container_info.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless container info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container_info +short_description: Retrieve information on Scaleway Container +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container. + required: true +''' + +EXAMPLES = ''' +- name: Get a container info + community.general.scaleway_container_info: + namespace_id: '{{ scw_container_namespace }}' + region: fr-par + name: my-awesome-container + register: container_info_task +''' + +RETURN = ''' +container: + description: The container information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("containers") + cn_lookup = dict((fn["name"], fn) + for fn in cn_list) + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"], + wished_cn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + summary = info_strategy(api=api, wished_cn=wished_container) + + module.exit_json(changed=False, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py new file mode 100644 index 000000000..fb01b8672 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless container namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container_namespace +short_description: Scaleway Container namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the container namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in containers at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating thoses values will not output a C(changed) state in Ansible. + - Injected in containers at runtime. + type: dict + default: {} +''' + +EXAMPLES = ''' +- name: Create a container namespace + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_namespace_creation_task + +- name: Make sure container namespace is deleted + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-namespace +''' + +RETURN = ''' +container_namespace: + description: The container namespace information. + returned: when I(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +''' + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "project_id": wished_cn["project_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = dict((cn["name"], cn) + for cn in cn_list) + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = dict((cn["name"], cn) + for cn in cn_list) + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container namespace would be created."} + + # Create container namespace + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container namespace attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, cn_patch_response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_namespace["state"]](api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=changed, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py new file mode 100644 index 000000000..758720dd5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless container namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container_namespace_info +short_description: Retrieve information on Scaleway Container namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true +''' + +EXAMPLES = ''' +- name: Get a container namespace info + community.general.scaleway_container_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-namespace + register: container_namespace_info_task +''' + +RETURN = ''' +container_namespace: + description: The container namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = dict((fn["name"], fn) + for fn in cn_list) + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py new file mode 100644 index 000000000..5eee571ec --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Container registry management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container_registry +short_description: Scaleway Container registry management module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container registries on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the container regitry. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true + + description: + description: + - Description of the container registry. + type: str + default: '' + + privacy_policy: + type: str + description: + - Default visibility policy. + - Everyone will be able to pull images from a C(public) registry. + choices: + - public + - private + default: private +''' + +EXAMPLES = ''' +- name: Create a container registry + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-registry + register: container_registry_creation_task + +- name: Make sure container registry is deleted + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-registry +''' + +RETURN = ''' +container_registry: + description: The container registry information. + returned: when I(state=present) + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "is_public" +) + + +def payload_from_wished_cr(wished_cr): + payload = { + "project_id": wished_cr["project_id"], + "name": wished_cr["name"], + "description": wished_cr["description"], + "is_public": wished_cr["privacy_policy"] == "public" + } + + return payload + + +def absent_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = dict((cr["name"], cr) + for cr in cr_list) + + if wished_cr["name"] not in cr_lookup: + return changed, {} + + target_cr = cr_lookup[wished_cr["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cr["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container registry [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = dict((cr["name"], cr) + for cr in cr_list) + + payload_cr = payload_from_wished_cr(wished_cr) + + if wished_cr["name"] not in cr_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container registry would be created."} + + # Create container registry + api.warn(payload_cr) + creation_response = api.post(path=api.api_path, + data=payload_cr) + + if not creation_response.ok: + msg = "Error during container registry creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cr = cr_lookup[wished_cr["name"]] + patch_payload = resource_attributes_should_be_changed(target=target_cr, + wished=payload_cr, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cr + + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry attributes would be changed."} + + cr_patch_response = api.patch(path=api.api_path + "/%s" % target_cr["id"], + data=patch_payload) + + if not cr_patch_response.ok: + api.module.fail_json(msg='Error during container registry attributes update: [{0}: {1}]'.format( + cr_patch_response.status_code, cr_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cr["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_container_registry = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "privacy_policy": module.params['privacy_policy'] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_registry["state"]](api=api, wished_cr=wished_container_registry) + + module.exit_json(changed=changed, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + privacy_policy=dict(type='str', default='private', choices=['public', 'private']) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py new file mode 100644 index 000000000..9c641edcb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless container registry info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_container_registry_info +short_description: Scaleway Container registry info module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container registry on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true +''' + +EXAMPLES = ''' +- name: Get a container registry info + community.general.scaleway_container_registry_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-registry + register: container_registry_info_task +''' + +RETURN = ''' +container_registry: + description: The container registry information. + returned: always + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = dict((fn["name"], fn) + for fn in cn_list) + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container registry lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py new file mode 100644 index 000000000..edc9f6cab --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway database backups management module +# +# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_database_backup +short_description: Scaleway database backups management module +version_added: 1.2.0 +author: Guillaume Rodriguez (@guillaume_ro_fr) +description: + - "This module manages database backups on Scaleway account U(https://developer.scaleway.com)." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the database backup. + - C(present) creates a backup. + - C(absent) deletes the backup. + - C(exported) creates a download link for the backup. + - C(restored) restores the backup to a new database. + type: str + default: present + choices: + - present + - absent + - exported + - restored + + region: + description: + - Scaleway region to use (for example C(fr-par)). + type: str + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + id: + description: + - UUID used to identify the database backup. + - Required for C(absent), C(exported) and C(restored) states. + type: str + + name: + description: + - Name used to identify the database backup. + - Required for C(present) state. + - Ignored when I(state=absent), I(state=exported) or I(state=restored). + type: str + required: false + + database_name: + description: + - Name used to identify the database. + - Required for C(present) and C(restored) states. + - Ignored when I(state=absent) or I(state=exported). + type: str + required: false + + instance_id: + description: + - UUID of the instance associated to the database backup. + - Required for C(present) and C(restored) states. + - Ignored when I(state=absent) or I(state=exported). + type: str + required: false + + expires_at: + description: + - Expiration datetime of the database backup (ISO 8601 format). + - Ignored when I(state=absent), I(state=exported) or I(state=restored). + type: str + required: false + + wait: + description: + - Wait for the instance to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + description: + - Time to wait for the backup to reach the expected state. + type: int + required: false + default: 300 + + wait_sleep_time: + description: + - Time to wait before every attempt to check the state of the backup. + type: int + required: false + default: 3 +''' + +EXAMPLES = ''' + - name: Create a backup + community.general.scaleway_database_backup: + name: 'my_backup' + state: present + region: 'fr-par' + database_name: 'my-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' + + - name: Export a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: exported + region: 'fr-par' + + - name: Restore a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: restored + region: 'fr-par' + database_name: 'my-new-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' + + - name: Remove a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: absent + region: 'fr-par' +''' + +RETURN = ''' +metadata: + description: Backup metadata. + returned: when I(state=present), I(state=exported) or I(state=restored) + type: dict + sample: { + "metadata": { + "created_at": "2020-08-06T12:42:05.631049Z", + "database_name": "my-database", + "download_url": null, + "download_url_expires_at": null, + "expires_at": null, + "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", + "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", + "instance_name": "my-instance", + "name": "backup_name", + "region": "fr-par", + "size": 600000, + "status": "ready", + "updated_at": "2020-08-06T12:42:10.581649Z" + } + } +''' + +import datetime +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + scaleway_argument_spec, + SCALEWAY_REGIONS, +) + +stable_states = ( + 'ready', + 'deleting', +) + + +def wait_to_complete_state_transition(module, account_api, backup=None): + wait_timeout = module.params['wait_timeout'] + wait_sleep_time = module.params['wait_sleep_time'] + + if backup is None or backup['status'] in stable_states: + return backup + + start = datetime.datetime.utcnow() + end = start + datetime.timedelta(seconds=wait_timeout) + while datetime.datetime.utcnow() < end: + module.debug('We are going to wait for the backup to finish its transition') + + response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) + if not response.ok: + module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json)) + break + response_json = response.json + + if response_json['status'] in stable_states: + module.debug('It seems that the backup is not in transition anymore.') + module.debug('Backup in state: %s' % response_json['status']) + return response_json + time.sleep(wait_sleep_time) + else: + module.fail_json(msg='Backup takes too long to finish its transition') + + +def present_strategy(module, account_api, backup): + name = module.params['name'] + database_name = module.params['database_name'] + instance_id = module.params['instance_id'] + expiration_date = module.params['expires_at'] + + if backup is not None: + if (backup['name'] == name or name is None) and ( + backup['expires_at'] == expiration_date or expiration_date is None): + wait_to_complete_state_transition(module, account_api, backup) + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + payload = {} + if name is not None: + payload['name'] = name + if expiration_date is not None: + payload['expires_at'] = expiration_date + + response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']), + payload) + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json)) + + if module.check_mode: + module.exit_json(changed=True) + + payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id} + if expiration_date is not None: + payload['expires_at'] = expiration_date + + response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def absent_strategy(module, account_api, backup): + if backup is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def exported_strategy(module, account_api, backup): + if backup is None: + module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) + + if backup['download_url'] is not None: + module.exit_json(changed=False, metadata=backup) + + if module.check_mode: + module.exit_json(changed=True) + + backup = wait_to_complete_state_transition(module, account_api, backup) + response = account_api.post( + '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {}) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json)) + + +def restored_strategy(module, account_api, backup): + if backup is None: + module.fail_json(msg=('Backup "%s" not found' % module.params['id'])) + + database_name = module.params['database_name'] + instance_id = module.params['instance_id'] + + if module.check_mode: + module.exit_json(changed=True) + + backup = wait_to_complete_state_transition(module, account_api, backup) + + payload = {'database_name': database_name, 'instance_id': instance_id} + response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']), + payload) + + if response.ok: + result = wait_to_complete_state_transition(module, account_api, response.json) + module.exit_json(changed=True, metadata=result) + + module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json)) + + +state_strategy = { + 'present': present_strategy, + 'absent': absent_strategy, + 'exported': exported_strategy, + 'restored': restored_strategy, +} + + +def core(module): + state = module.params['state'] + backup_id = module.params['id'] + + account_api = Scaleway(module) + + if backup_id is None: + backup_by_id = None + else: + response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id)) + status_code = response.status_code + backup_json = response.json + backup_by_id = None + if status_code == 404: + backup_by_id = None + elif response.ok: + backup_by_id = backup_json + else: + module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message'])) + + state_strategy[state](module, account_api, backup_by_id) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']), + region=dict(required=True, choices=SCALEWAY_REGIONS), + id=dict(), + name=dict(type='str'), + database_name=dict(required=False), + instance_id=dict(required=False), + expires_at=dict(), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=300), + wait_sleep_time=dict(type='int', default=3), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=[ + ['database_name', 'instance_id'], + ], + required_if=[ + ['state', 'present', ['name', 'database_name', 'instance_id']], + ['state', 'absent', ['id']], + ['state', 'exported', ['id']], + ['state', 'restored', ['id', 'database_name', 'instance_id']], + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function.py b/ansible_collections/community/general/plugins/modules/scaleway_function.py new file mode 100644 index 000000000..378545866 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_function.py @@ -0,0 +1,394 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless function management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_function +short_description: Scaleway Function management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the function. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Function namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true + + description: + description: + - Description of the function. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the function. + type: int + + max_scale: + description: + - Maximum number of replicas for the function. + type: int + + environment_variables: + description: + - Environment variables of the function. + - Injected in function at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function. + - Updating thoses values will not output a C(changed) state in Ansible. + - Injected in function at runtime. + type: dict + default: {} + + runtime: + description: + - Runtime of the function + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes + type: str + required: true + + memory_limit: + description: + - Resources define performance characteristics of your function. + - They are allocated to your function at runtime. + type: int + + function_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + handler: + description: + - The C(module-name.export) value in your function. + type: str + + privacy: + description: + - Privacy policies define whether a function can be executed anonymously. + - Choose C(public) to enable anonymous execution, or C(private) to protect your function with an authentication mechanism provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + redeploy: + description: + - Redeploy the function if update is required. + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Create a function + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: present + name: my-awesome-function + runtime: python3 + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_creation_task + +- name: Make sure function is deleted + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: absent + name: my-awesome-function +''' + +RETURN = ''' +function: + description: The function information. + returned: when I(state=present) + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +''' + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +VERIFIABLE_MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "runtime", + "memory_limit", + "timeout", + "handler", + "privacy", + "secret_environment_variables" +) + +MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + ( + "redeploy", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "namespace_id": wished_fn["namespace_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "min_scale": wished_fn["min_scale"], + "max_scale": wished_fn["max_scale"], + "runtime": wished_fn["runtime"], + "memory_limit": wished_fn["memory_limit"], + "timeout": wished_fn["timeout"], + "handler": wished_fn["handler"], + "privacy": wished_fn["privacy"], + "redeploy": wished_fn["redeploy"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_fn["redeploy"] + + # Create function + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params['min_scale'], + "max_scale": module.params['max_scale'], + "runtime": module.params["runtime"], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["function_timeout"], + "handler": module.params["handler"], + "privacy": module.params["privacy"], + "redeploy": module.params["redeploy"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + changed, summary = state_strategy[wished_function["state"]](api=api, wished_fn=wished_function) + + module.exit_json(changed=changed, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + runtime=dict(type='str', required=True), + memory_limit=dict(type='int'), + function_timeout=dict(type='str'), + handler=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_info.py b/ansible_collections/community/general/plugins/modules/scaleway_function_info.py new file mode 100644 index 000000000..c30f0cdb0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_function_info.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless function info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_function_info +short_description: Retrieve information on Scaleway Function +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true +''' + +EXAMPLES = ''' +- name: Get a function info + community.general.scaleway_function_info: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + name: my-awesome-function + register: function_info_task +''' + +RETURN = ''' +function: + description: The function information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("functions") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"], + wished_fn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + summary = info_strategy(api=api, wished_fn=wished_function) + + module.exit_json(changed=False, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py new file mode 100644 index 000000000..f6310b35b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless function namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_function_namespace +short_description: Scaleway Function namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the function namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true + + description: + description: + - Description of the function namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the function namespace. + - Injected in functions at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function namespace. + - Updating thoses values will not output a C(changed) state in Ansible. + - Injected in functions at runtime. + type: dict + default: {} +''' + +EXAMPLES = ''' +- name: Create a function namespace + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-function-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_namespace_creation_task + +- name: Make sure function namespace is deleted + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-function-namespace +''' + +RETURN = ''' +function_namespace: + description: The function namespace information. + returned: when I(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +''' + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "project_id": wished_fn["project_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function namespace would be created."} + + # Create function namespace + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function namespace attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_function_namespace["state"]](api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=changed, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py new file mode 100644 index 000000000..f3ea5ddfc --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Serverless function namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_function_namespace_info +short_description: Retrieve information on Scaleway Function namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true +''' + +EXAMPLES = ''' +- name: Get a function namespace info + community.general.scaleway_function_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-function-namespace + register: function_namespace_info_task +''' + +RETURN = ''' +function_namespace: + description: The function namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = dict((fn["name"], fn) + for fn in fn_list) + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"], + wished_fn["project_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=False, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_image_info.py b/ansible_collections/community/general/plugins/modules/scaleway_image_info.py new file mode 100644 index 000000000..bdae18514 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_image_info.py @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_image_info +short_description: Gather information about the Scaleway images available +description: + - Gather information about the Scaleway images available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway images information + community.general.scaleway_image_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_image_info }}" +''' + +RETURN = r''' +--- +scaleway_image_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_image_info": [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + + +class ScalewayImageInfo(Scaleway): + + def __init__(self, module): + super(ScalewayImageInfo, self).__init__(module) + self.name = 'images' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_image_info=ScalewayImageInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/scaleway_ip.py new file mode 100644 index 000000000..cf8e2e601 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_ip.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway IP management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_ip +short_description: Scaleway IP management module +author: Remy Leone (@remyleone) +description: + - This module manages IP on Scaleway account + U(https://developer.scaleway.com) +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the IP. + default: present + choices: + - present + - absent + + organization: + type: str + description: + - Scaleway organization identifier + required: true + + region: + type: str + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + id: + type: str + description: + - id of the Scaleway IP (UUID) + + server: + type: str + description: + - id of the server you want to attach an IP to. + - To unattach an IP don't specify this option + + reverse: + type: str + description: + - Reverse to assign to the IP +''' + +EXAMPLES = ''' +- name: Create an IP + community.general.scaleway_ip: + organization: '{{ scw_org }}' + state: present + region: par1 + register: ip_creation_task + +- name: Make sure IP deleted + community.general.scaleway_ip: + id: '{{ ip_creation_task.scaleway_ip.id }}' + state: absent + region: par1 +''' + +RETURN = ''' +data: + description: This is only present when I(state=present). + returned: when I(state=present) + type: dict + sample: { + "ips": [ + { + "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", + "reverse": null, + "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", + "server": { + "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", + "name": "ansible_tuto-1" + }, + "address": "212.47.232.136" + } + ] + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def ip_attributes_should_be_changed(api, target_ip, wished_ip): + patch_payload = {} + + if target_ip["reverse"] != wished_ip["reverse"]: + patch_payload["reverse"] = wished_ip["reverse"] + + # IP is assigned to a server + if target_ip["server"] is None and wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + + # IP is unassigned to a server + try: + if target_ip["server"]["id"] and wished_ip["server"] is None: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + # IP is migrated between 2 different servers + try: + if target_ip["server"]["id"] != wished_ip["server"]: + patch_payload["server"] = wished_ip["server"] + except (TypeError, KeyError): + pass + + return patch_payload + + +def payload_from_wished_ip(wished_ip): + return dict( + (k, v) + for k, v in wished_ip.items() + if k != 'id' and v is not None + ) + + +def present_strategy(api, wished_ip): + changed = False + + response = api.get('ips') + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + ips_list = response.json["ips"] + ip_lookup = dict((ip["id"], ip) + for ip in ips_list) + + if wished_ip["id"] not in ip_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "An IP would be created."} + + # Create IP + creation_response = api.post('/ips', + data=payload_from_wished_ip(wished_ip)) + + if not creation_response.ok: + msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + return changed, creation_response.json["ip"] + + target_ip = ip_lookup[wished_ip["id"]] + patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip) + + if not patch_payload: + return changed, target_ip + + changed = True + if api.module.check_mode: + return changed, {"status": "IP attributes would be changed."} + + ip_patch_response = api.patch(path="ips/%s" % target_ip["id"], + data=patch_payload) + + if not ip_patch_response.ok: + api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format( + ip_patch_response.status_code, ip_patch_response.json['message'])) + + return changed, ip_patch_response.json["ip"] + + +def absent_strategy(api, wished_ip): + response = api.get('ips') + changed = False + + status_code = response.status_code + ips_json = response.json + ips_list = ips_json["ips"] + + if not response.ok: + api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( + status_code, response.json['message'])) + + ip_lookup = dict((ip["id"], ip) + for ip in ips_list) + if wished_ip["id"] not in ip_lookup.keys(): + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "IP would be destroyed"} + + response = api.delete('/ips/' + wished_ip["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + wished_ip = { + "organization": module.params['organization'], + "reverse": module.params["reverse"], + "id": module.params["id"], + "server": module.params["server"] + } + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, wished_ip=wished_ip) + else: + changed, summary = present_strategy(api=api, wished_ip=wished_ip) + module.exit_json(changed=changed, scaleway_ip=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + organization=dict(required=True), + server=dict(), + reverse=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + id=dict() + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py b/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py new file mode 100644 index 000000000..1fd4be589 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_ip_info +short_description: Gather information about the Scaleway ips available +description: + - Gather information about the Scaleway ips available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway ips information + community.general.scaleway_ip_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_ip_info }}" +''' + +RETURN = r''' +--- +scaleway_ip_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_ip_info": [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayIpInfo(Scaleway): + + def __init__(self, module): + super(ScalewayIpInfo, self).__init__(module) + self.name = 'ips' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_ip_info=ScalewayIpInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_lb.py b/ansible_collections/community/general/plugins/modules/scaleway_lb.py new file mode 100644 index 000000000..3e43a8ae2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_lb.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Load-balancer management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_lb +short_description: Scaleway load-balancer management module +author: Remy Leone (@remyleone) +description: + - "This module manages load-balancers on Scaleway." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + name: + type: str + description: + - Name of the load-balancer. + required: true + + description: + type: str + description: + - Description of the load-balancer. + required: true + + organization_id: + type: str + description: + - Organization identifier. + required: true + + state: + type: str + description: + - Indicate desired state of the instance. + default: present + choices: + - present + - absent + + region: + type: str + description: + - Scaleway zone. + required: true + choices: + - nl-ams + - fr-par + - pl-waw + + tags: + type: list + elements: str + default: [] + description: + - List of tags to apply to the load-balancer. + + wait: + description: + - Wait for the load-balancer to reach its desired state before returning. + type: bool + default: false + + wait_timeout: + type: int + description: + - Time to wait for the load-balancer to reach the expected state. + required: false + default: 300 + + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the load-balancer. + required: false + default: 3 +''' + +EXAMPLES = ''' +- name: Create a load-balancer + community.general.scaleway_lb: + name: foobar + state: present + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par + tags: + - hello + +- name: Delete a load-balancer + community.general.scaleway_lb: + name: foobar + state: absent + organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 + region: fr-par +''' + +RETURNS = ''' +{ + "scaleway_lb": { + "backend_count": 0, + "frontend_count": 0, + "description": "Description of my load-balancer", + "id": "00000000-0000-0000-0000-000000000000", + "instances": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.1", + "region": "fr-par", + "status": "ready" + }, + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.2", + "region": "fr-par", + "status": "ready" + } + ], + "ip": [ + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "192.168.0.1", + "lb_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "organization_id": "00000000-0000-0000-0000-000000000000", + "reverse": "" + } + ], + "name": "lb_ansible_test", + "organization_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "status": "ready", + "tags": [ + "first_tag", + "second_tag" + ] + } +} +''' + +import datetime +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "name", + "description" +) + + +def payload_from_wished_lb(wished_lb): + return { + "organization_id": wished_lb["organization_id"], + "name": wished_lb["name"], + "tags": wished_lb["tags"], + "description": wished_lb["description"] + } + + +def fetch_state(api, lb): + api.module.debug("fetch_state of load-balancer: %s" % lb["id"]) + response = api.get(path=api.api_path + "/%s" % lb["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + api.module.fail_json(msg=msg) + + try: + api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"])) + return response.json["status"] + except KeyError: + api.module.fail_json(msg="Could not fetch state in %s" % response.json) + + +def wait_to_complete_state_transition(api, lb, force_wait=False): + wait = api.module.params["wait"] + if not (wait or force_wait): + return + wait_timeout = api.module.params["wait_timeout"] + wait_sleep_time = api.module.params["wait_sleep_time"] + + start = datetime.datetime.utcnow() + end = start + datetime.timedelta(seconds=wait_timeout) + while datetime.datetime.utcnow() < end: + api.module.debug("We are going to wait for the load-balancer to finish its transition") + state = fetch_state(api, lb) + if state in STABLE_STATES: + api.module.debug("It seems that the load-balancer is not in transition anymore.") + api.module.debug("load-balancer in state: %s" % fetch_state(api, lb)) + break + time.sleep(wait_sleep_time) + else: + api.module.fail_json(msg="Server takes too long to finish its transition") + + +def lb_attributes_should_be_changed(target_lb, wished_lb): + diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) + + if diff: + return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) + else: + return diff + + +def present_strategy(api, wished_lb): + changed = False + + response = api.get(path=api.api_path) + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + response.status_code, response.json['message'])) + + lbs_list = response.json["lbs"] + lb_lookup = dict((lb["name"], lb) + for lb in lbs_list) + + if wished_lb["name"] not in lb_lookup.keys(): + changed = True + if api.module.check_mode: + return changed, {"status": "A load-balancer would be created."} + + # Create Load-balancer + api.warn(payload_from_wished_lb(wished_lb)) + creation_response = api.post(path=api.api_path, + data=payload_from_wished_lb(wished_lb)) + + if not creation_response.ok: + msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + wait_to_complete_state_transition(api=api, lb=creation_response.json) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_lb = lb_lookup[wished_lb["name"]] + patch_payload = lb_attributes_should_be_changed(target_lb=target_lb, + wished_lb=wished_lb) + + if not patch_payload: + return changed, target_lb + + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer attributes would be changed."} + + lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"], + data=patch_payload) + + if not lb_patch_response.ok: + api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format( + lb_patch_response.status_code, lb_patch_response.json['message'])) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, lb_patch_response.json + + +def absent_strategy(api, wished_lb): + response = api.get(path=api.api_path) + changed = False + + status_code = response.status_code + lbs_json = response.json + lbs_list = lbs_json["lbs"] + + if not response.ok: + api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( + status_code, response.json['message'])) + + lb_lookup = dict((lb["name"], lb) + for lb in lbs_list) + if wished_lb["name"] not in lb_lookup.keys(): + return changed, {} + + target_lb = lb_lookup[wished_lb["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Load-balancer would be destroyed"} + + wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_lb["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format( + response.status_code, response.json)) + + wait_to_complete_state_transition(api=api, lb=target_lb) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_load_balancer = { + "state": module.params["state"], + "name": module.params["name"], + "description": module.params["description"], + "tags": module.params["tags"], + "organization_id": module.params["organization_id"] + } + module.params['api_url'] = SCALEWAY_ENDPOINT + api = Scaleway(module=module) + api.api_path = "lb/v1/regions/%s/lbs" % region + + changed, summary = state_strategy[wished_load_balancer["state"]](api=api, + wished_lb=wished_load_balancer) + module.exit_json(changed=changed, scaleway_lb=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + description=dict(required=True), + region=dict(required=True, choices=SCALEWAY_REGIONS), + state=dict(choices=list(state_strategy.keys()), default='present'), + tags=dict(type="list", elements="str", default=[]), + organization_id=dict(required=True), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py b/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py new file mode 100644 index 000000000..e9e272c98 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_organization_info +short_description: Gather information about the Scaleway organizations available +description: + - Gather information about the Scaleway organizations available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +options: + api_url: + description: + - Scaleway API URL. + default: 'https://account.scaleway.com' + aliases: ['base_url'] +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = r''' +- name: Gather Scaleway organizations information + community.general.scaleway_organization_info: + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_organization_info }}" +''' + +RETURN = r''' +--- +scaleway_organization_info: + description: Response from Scaleway API. + returned: success + type: list + elements: dict + sample: + "scaleway_organization_info": [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec +) + + +class ScalewayOrganizationInfo(Scaleway): + + def __init__(self, module): + super(ScalewayOrganizationInfo, self).__init__(module) + self.name = 'organizations' + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py new file mode 100644 index 000000000..33fb7381c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway VPC management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_private_network +short_description: Scaleway private network management +version_added: 4.5.0 +author: Pascal MANGIN (@pastral) +description: + - "This module manages private network on Scaleway account (U(https://developer.scaleway.com))." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the VPC. + default: present + choices: + - present + - absent + + project: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + name: + type: str + description: + - Name of the VPC. + + tags: + type: list + elements: str + description: + - List of tags to apply to the instance. + default: [] + +''' + +EXAMPLES = ''' +- name: Create an private network + community.general.scaleway_vpc: + project: '{{ scw_project }}' + name: 'vpc_one' + state: present + region: par1 + register: vpc_creation_task + +- name: Make sure private network with name 'foo' is deleted in region par1 + community.general.scaleway_vpc: + name: 'foo' + state: absent + region: par1 +''' + +RETURN = ''' +scaleway_private_network: + description: Information on the VPC. + returned: success when I(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def get_private_network(api, name, page=1): + page_size = 10 + response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size}) + if not response.ok: + msg = "Error during get private network creation: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + + if response.json['total_count'] == 0: + return None + + i = 0 + while i < len(response.json['private_networks']): + if response.json['private_networks'][i]['name'] == name: + return response.json['private_networks'][i] + i += 1 + + # search on next page if needed + if (page * page_size) < response.json['total_count']: + return get_private_network(api, name, page + 1) + + return None + + +def present_strategy(api, wished_private_network): + + changed = False + private_network = get_private_network(api, wished_private_network['name']) + if private_network is not None: + if set(wished_private_network['tags']) == set(private_network['tags']): + return changed, private_network + else: + # private network need to be updated + data = {'name': wished_private_network['name'], + 'tags': wished_private_network['tags'] + } + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be updated"} + + response = api.patch(path='private-networks/' + private_network['id'], data=data) + if not response.ok: + api.module.fail_json(msg='Error updating private network [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + # private network need to be create + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be created"} + + data = {'name': wished_private_network['name'], + 'project_id': wished_private_network['project'], + 'tags': wished_private_network['tags'] + } + + response = api.post(path='private-networks/', data=data) + + if not response.ok: + api.module.fail_json(msg='Error creating private network [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + +def absent_strategy(api, wished_private_network): + + changed = False + private_network = get_private_network(api, wished_private_network['name']) + if private_network is None: + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be destroyed"} + + response = api.delete('private-networks/' + private_network['id']) + + if not response.ok: + api.module.fail_json(msg='Error deleting private network [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + + wished_private_network = { + "project": module.params['project'], + "tags": module.params['tags'], + "name": module.params['name'] + } + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, wished_private_network=wished_private_network) + else: + changed, summary = present_strategy(api=api, wished_private_network=wished_private_network) + module.exit_json(changed=changed, scaleway_private_network=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + tags=dict(type="list", elements="str", default=[]), + name=dict() + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py new file mode 100644 index 000000000..5523da41c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Security Group management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_security_group +short_description: Scaleway Security Group management module +author: Antoine Barbare (@abarbare) +description: + - "This module manages Security Group on Scaleway account U(https://developer.scaleway.com)." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Indicate desired state of the Security Group. + type: str + choices: [ absent, present ] + default: present + + organization: + description: + - Organization identifier. + type: str + required: true + + region: + description: + - Scaleway region to use (for example C(par1)). + type: str + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + name: + description: + - Name of the Security Group. + type: str + required: true + + description: + description: + - Description of the Security Group. + type: str + + stateful: + description: + - Create a stateful security group which allows established connections in and out. + type: bool + required: true + + inbound_default_policy: + description: + - Default policy for incoming traffic. + type: str + choices: [ accept, drop ] + + outbound_default_policy: + description: + - Default policy for outcoming traffic. + type: str + choices: [ accept, drop ] + + organization_default: + description: + - Create security group to be the default one. + type: bool +''' + +EXAMPLES = ''' +- name: Create a Security Group + community.general.scaleway_security_group: + state: present + region: par1 + name: security_group + description: "my security group description" + organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9" + stateful: false + inbound_default_policy: accept + outbound_default_policy: accept + organization_default: false + register: security_group_creation_task +''' + +RETURN = ''' +data: + description: This is only present when I(state=present). + returned: when I(state=present) + type: dict + sample: { + "scaleway_security_group": { + "description": "my security group description", + "enable_default_security": true, + "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", + "inbound_default_policy": "accept", + "name": "security_group", + "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", + "organization_default": false, + "outbound_default_policy": "accept", + "servers": [], + "stateful": false + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule +from uuid import uuid4 + + +def payload_from_security_group(security_group): + return dict( + (k, v) + for k, v in security_group.items() + if k != 'id' and v is not None + ) + + +def present_strategy(api, security_group): + ret = {'changed': False} + + response = api.get('security_groups') + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = dict((sg['name'], sg) + for sg in response.json['security_groups']) + + if security_group['name'] not in security_group_lookup.keys(): + ret['changed'] = True + if api.module.check_mode: + # Help user when check mode is enabled by defining id key + ret['scaleway_security_group'] = {'id': str(uuid4())} + return ret + + # Create Security Group + response = api.post('/security_groups', + data=payload_from_security_group(security_group)) + + if not response.ok: + msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + ret['scaleway_security_group'] = response.json['security_group'] + + else: + ret['scaleway_security_group'] = security_group_lookup[security_group['name']] + + return ret + + +def absent_strategy(api, security_group): + response = api.get('security_groups') + ret = {'changed': False} + + if not response.ok: + api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + security_group_lookup = dict((sg['name'], sg) + for sg in response.json['security_groups']) + if security_group['name'] not in security_group_lookup.keys(): + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id']) + if not response.ok: + api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + security_group = { + 'organization': module.params['organization'], + 'name': module.params['name'], + 'description': module.params['description'], + 'stateful': module.params['stateful'], + 'inbound_default_policy': module.params['inbound_default_policy'], + 'outbound_default_policy': module.params['outbound_default_policy'], + 'organization_default': module.params['organization_default'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + api = Scaleway(module=module) + if module.params['state'] == 'present': + summary = present_strategy(api=api, security_group=security_group) + else: + summary = absent_strategy(api=api, security_group=security_group) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + organization=dict(type='str', required=True), + name=dict(type='str', required=True), + description=dict(type='str'), + region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), + stateful=dict(type='bool', required=True), + inbound_default_policy=dict(type='str', choices=['accept', 'drop']), + outbound_default_policy=dict(type='str', choices=['accept', 'drop']), + organization_default=dict(type='bool'), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]] + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py new file mode 100644 index 000000000..fb28e8774 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_security_group_info +short_description: Gather information about the Scaleway security groups available +description: + - Gather information about the Scaleway security groups available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = r''' +- name: Gather Scaleway security groups information + community.general.scaleway_security_group_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_security_group_info }}" +''' + +RETURN = r''' +--- +scaleway_security_group_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_security_group_info": [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewaySecurityGroupInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySecurityGroupInfo, self).__init__(module) + self.name = 'security_groups' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py new file mode 100644 index 000000000..136631d03 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py @@ -0,0 +1,282 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway Security Group Rule management module +# +# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_security_group_rule +short_description: Scaleway Security Group Rule management module +author: Antoine Barbare (@abarbare) +description: + - "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)." +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes +requirements: + - ipaddress + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the Security Group Rule. + default: present + choices: + - present + - absent + + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + + protocol: + type: str + description: + - Network protocol to use. + choices: + - TCP + - UDP + - ICMP + required: true + + port: + description: + - Port related to the rule, null value for all the ports. + required: true + type: int + + ip_range: + type: str + description: + - IPV4 CIDR notation to apply to the rule. + default: 0.0.0.0/0 + + direction: + type: str + description: + - Rule direction. + choices: + - inbound + - outbound + required: true + + action: + type: str + description: + - Rule action. + choices: + - accept + - drop + required: true + + security_group: + type: str + description: + - Security Group unique identifier. + required: true +''' + +EXAMPLES = ''' + - name: Create a Security Group Rule + community.general.scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +''' + +RETURN = ''' +data: + description: This is only present when I(state=present). + returned: when I(state=present) + type: dict + sample: { + "scaleway_security_group_rule": { + "direction": "inbound", + "protocol": "TCP", + "ip_range": "0.0.0.0/0", + "dest_port_from": 80, + "action": "accept", + "position": 2, + "dest_port_to": null, + "editable": null, + "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" + } + } +''' + +import traceback + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + from ipaddress import ip_network # noqa: F401, pylint: disable=unused-import +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + HAS_IPADDRESS = False +else: + IPADDRESS_IMP_ERR = None + HAS_IPADDRESS = True + + +def get_sgr_from_api(security_group_rules, security_group_rule): + """ Check if a security_group_rule specs are present in security_group_rules + Return None if no rules match the specs + Return the rule if found + """ + for sgr in security_group_rules: + if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and + sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and + sgr['protocol'] == security_group_rule['protocol']): + return sgr + + return None + + +def present_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + ret['changed'] = True + if api.module.check_mode: + return ret + + # Create Security Group Rule + response = api.post('/security_groups/%s/rules' % security_group_id, + data=payload_from_object(security_group_rule)) + + if not response.ok: + api.module.fail_json( + msg='Error during security group rule creation: "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + ret['scaleway_security_group_rule'] = response.json['rule'] + + else: + ret['scaleway_security_group_rule'] = existing_rule + + return ret + + +def absent_strategy(api, security_group_id, security_group_rule): + ret = {'changed': False} + + response = api.get('security_groups/%s/rules' % security_group_id) + if not response.ok: + api.module.fail_json( + msg='Error getting security group rules "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + existing_rule = get_sgr_from_api( + response.json['rules'], security_group_rule) + + if not existing_rule: + return ret + + ret['changed'] = True + if api.module.check_mode: + return ret + + response = api.delete( + '/security_groups/%s/rules/%s' % + (security_group_id, existing_rule['id'])) + if not response.ok: + api.module.fail_json( + msg='Error deleting security group rule "%s": "%s" (%s)' % + (response.info['msg'], response.json['message'], response.json)) + + return ret + + +def core(module): + api = Scaleway(module=module) + + security_group_rule = { + 'protocol': module.params['protocol'], + 'dest_port_from': module.params['port'], + 'ip_range': module.params['ip_range'], + 'direction': module.params['direction'], + 'action': module.params['action'], + } + + region = module.params['region'] + module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint'] + + if module.params['state'] == 'present': + summary = present_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + else: + summary = absent_strategy( + api=api, + security_group_id=module.params['security_group'], + security_group_rule=security_group_rule) + module.exit_json(**summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', choices=['absent', 'present']), + region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())), + protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']), + port=dict(type='int', required=True), + ip_range=dict(type='str', default='0.0.0.0/0'), + direction=dict(type='str', required=True, choices=['inbound', 'outbound']), + action=dict(type='str', required=True, choices=['accept', 'drop']), + security_group=dict(type='str', required=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + if not HAS_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_server_info.py b/ansible_collections/community/general/plugins/modules/scaleway_server_info.py new file mode 100644 index 000000000..01e9410da --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_server_info.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_server_info +short_description: Gather information about the Scaleway servers available +description: + - Gather information about the Scaleway servers available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway servers information + community.general.scaleway_server_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_server_info }}" +''' + +RETURN = r''' +--- +scaleway_server_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_server_info": [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" + }, + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + } + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayServerInfo(Scaleway): + + def __init__(self, module): + super(ScalewayServerInfo, self).__init__(module) + self.name = 'servers' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_server_info=ScalewayServerInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py b/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py new file mode 100644 index 000000000..687f43c85 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_snapshot_info +short_description: Gather information about the Scaleway snapshots available +description: + - Gather information about the Scaleway snapshot available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway snapshots information + community.general.scaleway_snapshot_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_snapshot_info }}" +''' + +RETURN = r''' +--- +scaleway_snapshot_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_snapshot_info": [ + { + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION +) + + +class ScalewaySnapshotInfo(Scaleway): + + def __init__(self, module): + super(ScalewaySnapshotInfo, self).__init__(module) + self.name = 'snapshots' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py new file mode 100644 index 000000000..a39e57aa3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway SSH keys management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_sshkey +short_description: Scaleway SSH keys management module +author: Remy Leone (@remyleone) +description: + - "This module manages SSH keys on Scaleway account U(https://developer.scaleway.com)." +extends_documentation_fragment: +- community.general.scaleway +- community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the SSH key. + default: present + choices: + - present + - absent + ssh_pub_key: + type: str + description: + - The public SSH key as a string to add. + required: true + api_url: + type: str + description: + - Scaleway API URL. + default: 'https://account.scaleway.com' + aliases: ['base_url'] +''' + +EXAMPLES = ''' +- name: "Add SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +- name: "Delete SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "absent" + +- name: "Add SSH key with explicit token" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" +''' + +RETURN = ''' +data: + description: This is only present when I(state=present). + returned: when I(state=present) + type: dict + sample: { + "ssh_public_keys": [ + {"key": "ssh-rsa AAAA...."} + ] + } +''' + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway + + +def extract_present_sshkeys(raw_organization_dict): + ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"] + ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list] + return ssh_key_lookup + + +def extract_user_id(raw_organization_dict): + return raw_organization_dict["organizations"][0]["users"][0]["id"] + + +def sshkey_user_patch(ssh_lookup): + ssh_list = {"ssh_public_keys": [{"key": key} + for key in ssh_lookup]} + return ssh_list + + +def core(module): + ssh_pub_key = module.params['ssh_pub_key'] + state = module.params["state"] + account_api = Scaleway(module) + response = account_api.get('organizations') + + status_code = response.status_code + organization_json = response.json + + if not response.ok: + module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format( + status_code, response.json['message'])) + + user_id = extract_user_id(organization_json) + present_sshkeys = [] + try: + present_sshkeys = extract_present_sshkeys(organization_json) + except (KeyError, IndexError) as e: + module.fail_json(changed=False, data="Error while extracting present SSH keys from API") + + if state in ('present',): + if ssh_pub_key in present_sshkeys: + module.exit_json(changed=False) + + # If key not found create it! + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.append(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if ssh_pub_key not in present_sshkeys: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + present_sshkeys.remove(ssh_pub_key) + payload = sshkey_user_patch(present_sshkeys) + + response = account_api.patch('/users/%s' % user_id, data=payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + ssh_pub_key=dict(required=True), + api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_user_data.py b/ansible_collections/community/general/plugins/modules/scaleway_user_data.py new file mode 100644 index 000000000..08ff86a55 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_user_data.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway user data management module +# +# Copyright (C) 2018 Online SAS. +# https://www.scaleway.com +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_user_data +short_description: Scaleway user_data management module +author: Remy Leone (@remyleone) +description: + - This module manages user_data on compute instances on Scaleway. + - It can be used to configure cloud-init for instance. +extends_documentation_fragment: +- community.general.scaleway +- community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + server_id: + type: str + description: + - Scaleway Compute instance ID of the server. + required: true + + user_data: + type: dict + description: + - User defined data. Typically used with C(cloud-init). + - Pass your C(cloud-init) script here as a string. + required: false + + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = ''' +- name: Update the cloud-init + community.general.scaleway_user_data: + server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' + region: ams1 + user_data: + cloud-init: 'final_message: "Hello World!"' +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway + + +def patch_user_data(compute_api, server_id, key, value): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"}) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response + + +def delete_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting deleting user_data attributes: %s" % key) + + response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key)) + + if not response.ok: + msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body + compute_api.module.fail_json(msg=msg) + + return response + + +def get_user_data(compute_api, server_id, key): + compute_api.module.debug("Starting patching user_data attributes") + + path = "servers/%s/user_data/%s" % (server_id, key) + response = compute_api.get(path=path) + if not response.ok: + msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body) + compute_api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + server_id = module.params["server_id"] + user_data = module.params["user_data"] + changed = False + + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + compute_api = Scaleway(module=module) + + user_data_list = compute_api.get(path="servers/%s/user_data" % server_id) + if not user_data_list.ok: + msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body + compute_api.module.fail_json(msg=msg) + + present_user_data_keys = user_data_list.json["user_data"] + present_user_data = dict( + (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) + for key in present_user_data_keys + ) + + if present_user_data == user_data: + module.exit_json(changed=changed, msg=user_data_list.json) + + # First we remove keys that are not defined in the wished user_data + for key in present_user_data: + if key not in user_data: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + delete_user_data(compute_api=compute_api, server_id=server_id, key=key) + + # Then we patch keys that are different + for key, value in user_data.items(): + if key not in present_user_data or user_data[key] != present_user_data[key]: + + changed = True + if compute_api.module.check_mode: + module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id}) + + patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value) + + module.exit_json(changed=changed, msg=user_data) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + user_data=dict(type="dict"), + server_id=dict(required=True), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/scaleway_volume.py new file mode 100644 index 000000000..2ff09da54 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_volume.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Scaleway volumes management module +# +# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: scaleway_volume +short_description: Scaleway volumes management module +author: Henryk Konsek (@hekonsek) +description: + - "This module manages volumes on Scaleway account U(https://developer.scaleway.com)." +extends_documentation_fragment: +- community.general.scaleway +- community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + type: str + description: + - Indicate desired state of the volume. + default: present + choices: + - present + - absent + region: + type: str + description: + - Scaleway region to use (for example par1). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 + name: + type: str + description: + - Name used to identify the volume. + required: true + project: + type: str + description: + - Scaleway project ID to which volume belongs. + version_added: 4.3.0 + organization: + type: str + description: + - ScaleWay organization ID to which volume belongs. + size: + type: int + description: + - Size of the volume in bytes. + volume_type: + type: str + description: + - Type of the volume (for example 'l_ssd'). +''' + +EXAMPLES = ''' +- name: Create 10GB volume + community.general.scaleway_volume: + name: my-volume + state: present + region: par1 + project: "{{ scw_org }}" + "size": 10000000000 + volume_type: l_ssd + register: server_creation_check_task + +- name: Make sure volume deleted + community.general.scaleway_volume: + name: my-volume + state: absent + region: par1 +''' + +RETURN = ''' +data: + description: This is only present when I(state=present). + returned: when I(state=present) + type: dict + sample: { + "volume": { + "export_uri": null, + "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", + "name": "volume-0-3", + "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a", + "server": null, + "size": 10000000000, + "volume_type": "l_ssd" + } +} +''' + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def core(module): + region = module.params["region"] + state = module.params['state'] + name = module.params['name'] + organization = module.params['organization'] + project = module.params['project'] + size = module.params['size'] + volume_type = module.params['volume_type'] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + account_api = Scaleway(module) + response = account_api.get('volumes') + status_code = response.status_code + volumes_json = response.json + + if project is None: + project = organization + + if not response.ok: + module.fail_json(msg='Error getting volume [{0}: {1}]'.format( + status_code, response.json['message'])) + + volumeByName = None + for volume in volumes_json['volumes']: + if volume['project'] == project and volume['name'] == name: + volumeByName = volume + + if state in ('present',): + if volumeByName is not None: + module.exit_json(changed=False) + + payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type} + + response = account_api.post('/volumes', payload) + + if response.ok: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error creating volume [{0}: {1}]'.format( + response.status_code, response.json)) + + elif state in ('absent',): + if volumeByName is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + response = account_api.delete('/volumes/' + volumeByName['id']) + if response.status_code == 204: + module.exit_json(changed=True, data=response.json) + + module.fail_json(msg='Error deleting volume [{0}: {1}]'.format( + response.status_code, response.json)) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + name=dict(required=True), + size=dict(type='int'), + project=dict(), + organization=dict(), + volume_type=dict(), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ('organization', 'project'), + ], + required_one_of=[ + ('organization', 'project'), + ], + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py b/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py new file mode 100644 index 000000000..471845c43 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: scaleway_volume_info +short_description: Gather information about the Scaleway volumes available +description: + - Gather information about the Scaleway volumes available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.attributes.info_module + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - waw1 + - EMEA-PL-WAW1 +''' + +EXAMPLES = r''' +- name: Gather Scaleway volumes information + community.general.scaleway_volume_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_volume_info }}" +''' + +RETURN = r''' +--- +scaleway_volume_info: + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + returned: success + type: list + elements: dict + sample: + "scaleway_volume_info": [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } + ] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, + SCALEWAY_LOCATION) + + +class ScalewayVolumeInfo(Scaleway): + + def __init__(self, module): + super(ScalewayVolumeInfo, self).__init__(module) + self.name = 'volumes' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_volume_info=ScalewayVolumeInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sefcontext.py b/ansible_collections/community/general/plugins/modules/sefcontext.py new file mode 100644 index 000000000..b2fb36767 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sefcontext.py @@ -0,0 +1,385 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: + - Manages SELinux file context mapping definitions. + - Similar to the C(semanage fcontext) command. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.platform +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: linux +options: + target: + description: + - Target path (expression). + type: str + required: true + aliases: [ path ] + ftype: + description: + - The file type that should have SELinux contexts applied. + - "The following file type options are available:" + - C(a) for all files, + - C(b) for block devices, + - C(c) for character devices, + - C(d) for directories, + - C(f) for regular files, + - C(l) for symbolic links, + - C(p) for named pipes, + - C(s) for socket files. + type: str + choices: [ a, b, c, d, f, l, p, s ] + default: a + setype: + description: + - SELinux type for the specified I(target). + type: str + substitute: + description: + - Path to use to substitute file context(s) for the specified I(target). The context labeling for the I(target) subtree is made equivalent to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools. + version_added: 6.4.0 + type: str + aliases: [ equal ] + seuser: + description: + - SELinux user for the specified I(target). + - Defaults to C(system_u) for new file contexts and to existing value when modifying file contexts. + type: str + selevel: + description: + - SELinux range for the specified I(target). + - Defaults to C(s0) for new file contexts and to existing value when modifying file contexts. + type: str + aliases: [ serange ] + state: + description: + - Whether the SELinux file context must be C(absent) or C(present). + - Specifying C(absent) without either I(setype) or I(substitute) deletes both SELinux type or path substitution mappings that match I(target). + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: true + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you can't get the real SELinux state. + type: bool + default: false +notes: +- The changes are persistent across reboots. +- I(setype) and I(substitute) are mutually exclusive. +- If I(state=present) then one of I(setype) or I(substitute) is mandatory. +- The M(community.general.sefcontext) module does not modify existing files to the new + SELinux context(s), so it is advisable to first create the SELinux + file contexts before creating files, or run C(restorecon) manually + for the existing files that require the new SELinux file contexts. +- Not applying SELinux fcontexts to existing files is a deliberate + decision as it would be unclear what reported changes would entail + to, and there's no guarantee that applying SELinux fcontext does + not pick up other unrelated prior changes. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Allow apache to modify files in /srv/git_repos + community.general.sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_sys_rw_content_t + state: present + +- name: Substitute file contexts for path /srv/containers with /var/lib/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: present + +- name: Delete file context path substitution for /srv/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: absent + +- name: Delete any file context mappings for path /srv/git + community.general.sefcontext: + target: /srv/git + state: absent + +- name: Apply new SELinux file context to filesystem + ansible.builtin.command: restorecon -irv /srv/git_repos +''' + +RETURN = r''' +# Default return values +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_substitute_exists(sefcontext, target): + ''' Get the SELinux file context path substitution definition from policy. Return None if it does not exist. ''' + + return sefcontext.equiv_dist.get(target, sefcontext.equiv.get(target)) + + +def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + if substitute is None: + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + else: + exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists: + # Modify existing path substitution entry + orig_substitute = exists + + if substitute != orig_substitute: + if not module.check_mode: + sefcontext.modify_equal(target, substitute) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + prepared_diff += '+%s = %s\n' % (target, substitute) + else: + # Add missing path substitution entry + if not module.check_mode: + sefcontext.add_equal(target, substitute) + changed = True + if module._diff: + prepared_diff += '# Addition to semanage file context path substitutions\n' + prepared_diff += '+%s = %s\n' % (target, substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + substitute_exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists and substitute is None: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + if substitute_exists and setype is None and ((substitute is not None and substitute_exists == substitute) or substitute is None): + # Remove existing path substitution entry + orig_substitute = substitute_exists + + if not module.check_mode: + sefcontext.delete(target, orig_substitute) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())), + setype=dict(type='str'), + substitute=dict(type='str', aliases=['equal']), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ('setype', 'substitute'), + ('substitute', 'ftype'), + ('substitute', 'seuser'), + ('substitute', 'selevel'), + ], + required_if=[ + ('state', 'present', ('setype', 'substitute'), True), + ], + + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + substitute = module.params['substitute'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, substitute=substitute, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/ansible_collections/community/general/plugins/modules/selinux_permissive.py new file mode 100644 index 000000000..7249a01b8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/selinux_permissive.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Michael Scherer +# inspired by code of github.com/dandiker/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: selinux_permissive +short_description: Change permissive domain in SELinux policy +description: + - Add and remove a domain from the list of permissive domains. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + description: + - The domain that will be added or removed from the list of permissive domains. + type: str + required: true + aliases: [ name ] + permissive: + description: + - Indicate if the domain should or should not be set as permissive. + type: bool + required: true + no_reload: + description: + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is C(false), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6." + type: bool + default: false + store: + description: + - Name of the SELinux policy store to use. + type: str + default: '' +notes: + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [ policycoreutils-python ] +author: +- Michael Scherer (@mscherer) +''' + +EXAMPLES = r''' +- name: Change the httpd_t domain to permissive + community.general.selinux_permissive: + name: httpd_t + permissive: true +''' + +import traceback + +HAVE_SEOBJECT = False +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str', required=True, aliases=['name']), + store=dict(type='str', default=''), + permissive=dict(type='bool', required=True), + no_reload=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + # global vars + changed = False + store = module.params['store'] + permissive = module.params['permissive'] + domain = module.params['domain'] + no_reload = module.params['no_reload'] + + if not HAVE_SEOBJECT: + module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"), + exception=SEOBJECT_IMP_ERR) + + try: + permissive_domains = seobject.permissiveRecords(store) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + # not supported on EL 6 + if 'set_reload' in dir(permissive_domains): + permissive_domains.set_reload(not no_reload) + + try: + all_domains = permissive_domains.get_all() + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + + if permissive: + if domain not in all_domains: + if not module.check_mode: + try: + permissive_domains.add(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + else: + if domain in all_domains: + if not module.check_mode: + try: + permissive_domains.delete(domain) + except ValueError as e: + module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc()) + changed = True + + module.exit_json(changed=changed, store=store, + permissive=permissive, domain=domain) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/selogin.py b/ansible_collections/community/general/plugins/modules/selogin.py new file mode 100644 index 000000000..57482b090 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/selogin.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Petr Lautrbach +# Based on seport.py module (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: selogin +short_description: Manages linux user to SELinux user mapping +description: + - Manages linux user to SELinux user mapping +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + login: + type: str + description: + - a Linux user + required: true + seuser: + type: str + description: + - SELinux user name + selevel: + type: str + aliases: [ serange ] + description: + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + default: s0 + state: + type: str + description: + - Desired mapping value. + default: present + choices: [ 'present', 'absent' ] + reload: + description: + - Reload SELinux policy after commit. + type: bool + default: true + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: false +notes: + - The changes are persistent across reboots + - Not tested on any debian based system +requirements: [ 'libselinux', 'policycoreutils' ] +author: +- Dan Keder (@dankeder) +- Petr Lautrbach (@bachradsusi) +- James Cassell (@jamescassell) +''' + +EXAMPLES = ''' +- name: Modify the default user on the system to the guest_u user + community.general.selogin: + login: __default__ + seuser: guest_u + state: present + +- name: Assign gijoe user on an MLS machine a range and to the staff_u user + community.general.selogin: + login: gijoe + seuser: staff_u + serange: SystemLow-Secret + state: present + +- name: Assign all users in the engineering group to the staff_u user + community.general.selogin: + login: '%engineering' + seuser: staff_u + state: present +''' + +RETURN = r''' +# Default return values +''' + + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): + """ Add linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + # for local_login in all_logins: + if login not in all_logins.keys(): + change = True + if not module.check_mode: + selogin.add(login, seuser, serange) + else: + if all_logins[login][0] != seuser or all_logins[login][1] != serange: + change = True + if not module.check_mode: + selogin.modify(login, seuser, serange) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_login_del(module, login, seuser, do_reload, sestore=''): + """ Delete linux user to SELinux user mapping + + :type module: AnsibleModule + :param module: Ansible module + + :type login: str + :param login: a Linux User or a Linux group if it begins with % + + :type seuser: str + :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l' + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + try: + selogin = seobject.loginRecords(sestore) + selogin.set_reload(do_reload) + change = False + all_logins = selogin.get_all() + # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore)) + if login in all_logins.keys(): + change = True + if not module.check_mode: + selogin.delete(login) + + except (ValueError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + login=dict(type='str', required=True), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange'], default='s0'), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + required_if=[ + ["state", "present", ["seuser"]] + ], + supports_check_mode=True + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + login = module.params['login'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = { + 'login': login, + 'seuser': seuser, + 'serange': serange, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange) + elif state == 'absent': + result['changed'] = semanage_login_del(module, login, seuser, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sendgrid.py b/ansible_collections/community/general/plugins/modules/sendgrid.py new file mode 100644 index 000000000..2c0cc9a5b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sendgrid.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - "Sends an email with a SendGrid account through their API, not through + the SMTP service." +notes: + - "This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails." + - "Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account." + - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers + you must pip install sendgrid" + - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)" +requirements: + - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + username: + type: str + description: + - Username for logging into the SendGrid account. + - Since 2.2 it is only required if I(api_key) is not supplied. + password: + type: str + description: + - Password that corresponds to the username. + - Since 2.2 it is only required if I(api_key) is not supplied. + from_address: + type: str + description: + - The address in the "from" field for the email. + required: true + to_addresses: + type: list + elements: str + description: + - A list with one or more recipient email addresses. + required: true + subject: + type: str + description: + - The desired subject for the email. + required: true + api_key: + type: str + description: + - Sendgrid API key to use instead of username/password. + cc: + type: list + elements: str + description: + - A list of email addresses to cc. + bcc: + type: list + elements: str + description: + - A list of email addresses to bcc. + attachments: + type: list + elements: path + description: + - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs). + from_name: + type: str + description: + - The name you want to appear in the from field, i.e 'John Doe'. + html_body: + description: + - Whether the body is html content that should be rendered. + type: bool + default: false + headers: + type: dict + description: + - A dict to pass on as headers. + body: + type: str + description: + - The e-mail body content. + required: true +author: "Matt Makai (@makaimc)" +''' + +EXAMPLES = r''' +- name: Send an email to a single recipient that the deployment was successful + community.general.sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost + +- name: Send an email to more than one recipient that the build failed + community.general.sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost +''' + +# ======================================= +# sendgrid module support methods +# +import os +import traceback + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +SENDGRID_IMP_ERR = None +try: + import sendgrid + HAS_SENDGRID = True +except ImportError: + SENDGRID_IMP_ERR = traceback.format_exc() + HAS_SENDGRID = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.urls import fetch_url + + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body, api_key=None, cc=None, bcc=None, attachments=None, + html_body=False, from_name=None, headers=None): + + if not HAS_SENDGRID: + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key': password, + 'from': from_address, 'subject': subject, 'text': body} + encoded_data = urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + recipient = to_bytes(recipient, errors='surrogate_or_strict') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + + headers = {'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json'} + return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + else: + # Remove this check when adding Sendgrid API v3 support + if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"): + module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.") + + if api_key: + sg = sendgrid.SendGridClient(api_key) + else: + sg = sendgrid.SendGridClient(username, password) + + message = sendgrid.Mail() + message.set_subject(subject) + + for recip in to_addresses: + message.add_to(recip) + + if cc: + for recip in cc: + message.add_cc(recip) + if bcc: + for recip in bcc: + message.add_bcc(recip) + + if headers: + message.set_headers(headers) + + if attachments: + for f in attachments: + name = os.path.basename(f) + message.add_attachment(name, f) + + if from_name: + message.set_from('%s <%s.' % (from_name, from_address)) + else: + message.set_from(from_address) + + if html_body: + message.set_html(body) + else: + message.set_text(body) + + return sg.send(message) +# ======================================= +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=False), + password=dict(required=False, no_log=True), + api_key=dict(required=False, no_log=True), + bcc=dict(required=False, type='list', elements='str'), + cc=dict(required=False, type='list', elements='str'), + headers=dict(required=False, type='dict'), + from_address=dict(required=True), + from_name=dict(required=False), + to_addresses=dict(required=True, type='list', elements='str'), + subject=dict(required=True), + body=dict(required=True), + html_body=dict(required=False, default=False, type='bool'), + attachments=dict(required=False, type='list', elements='path') + ), + supports_check_mode=True, + mutually_exclusive=[ + ['api_key', 'password'], + ['api_key', 'username'] + ], + required_together=[['username', 'password']], + ) + + username = module.params['username'] + password = module.params['password'] + api_key = module.params['api_key'] + bcc = module.params['bcc'] + cc = module.params['cc'] + headers = module.params['headers'] + from_name = module.params['from_name'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + html_body = module.params['html_body'] + attachments = module.params['attachments'] + + sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] + + if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: + reason = 'when using any of the following arguments: ' \ + 'api_key, bcc, cc, headers, from_name, html_body, attachments' + module.fail_json(msg=missing_required_lib('sendgrid', reason=reason), + exception=SENDGRID_IMP_ERR) + + response, info = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body, attachments=attachments, + bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) + + if not HAS_SENDGRID: + if info['status'] != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) + else: + if response != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) + + module.exit_json(msg=subject, changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sensu_check.py b/ansible_collections/community/general/plugins/modules/sensu_check.py new file mode 100644 index 000000000..1ac2316a8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sensu_check.py @@ -0,0 +1,376 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: sensu_check +short_description: Manage Sensu checks +description: + - Manage the checks that should be run on a machine by I(Sensu). + - Most options do not have a default and will not be added to the check definition unless specified. + - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, + - they are simply specified for your convenience. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the check + - This is the key that is used to determine whether a check exists + required: true + state: + type: str + description: + - Whether the check should be present or not + choices: [ 'present', 'absent' ] + default: present + path: + type: str + description: + - Path to the json file of the check to be added/removed. + - Will be created if it does not exist (unless I(state=absent)). + - The parent folders need to exist when I(state=present), otherwise an error will be thrown + default: /etc/sensu/conf.d/checks.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so + - you can get the original file back if you somehow clobbered it incorrectly. + type: bool + default: false + command: + type: str + description: + - Path to the sensu check to run (not required when I(state=absent)) + handlers: + type: list + elements: str + description: + - List of handlers to notify when the check fails + subscribers: + type: list + elements: str + description: + - List of subscribers/channels this check should run for + - See sensu_subscribers to subscribe a machine to a channel + interval: + type: int + description: + - Check interval in seconds + timeout: + type: int + description: + - Timeout for the check + - If not specified, it defaults to 10. + ttl: + type: int + description: + - Time to live in seconds until the check is considered stale + handle: + description: + - Whether the check should be handled or not + - Default is C(false). + type: bool + subdue_begin: + type: str + description: + - When to disable handling of check failures + subdue_end: + type: str + description: + - When to enable handling of check failures + dependencies: + type: list + elements: str + description: + - Other checks this check depends on, if dependencies fail handling of this check will be disabled + metric: + description: + - Whether the check is a metric + type: bool + default: false + standalone: + description: + - Whether the check should be scheduled by the sensu client or server + - This option obviates the need for specifying the I(subscribers) option + - Default is C(false). + type: bool + publish: + description: + - Whether the check should be scheduled at all. + - You can still issue it via the sensu api + - Default is C(false). + type: bool + occurrences: + type: int + description: + - Number of event occurrences before the handler should take action + - If not specified, defaults to 1. + refresh: + type: int + description: + - Number of seconds handlers should wait before taking second action + aggregate: + description: + - Classifies the check as an aggregate check, + - making it available via the aggregate API + - Default is C(false). + type: bool + low_flap_threshold: + type: int + description: + - The low threshold for flap detection + high_flap_threshold: + type: int + description: + - The high threshold for flap detection + custom: + type: dict + description: + - A hash/dictionary of custom parameters for mixing to the configuration. + - You can't rewrite others module parameters using this + source: + type: str + description: + - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). +author: "Anders Ingemann (@andsens)" +''' + +EXAMPLES = ''' +# Fetch metrics about the CPU load every 60 seconds, +# the sensu server has a handler called 'relay' which forwards stats to graphite +- name: Get cpu metrics + community.general.sensu_check: + name: cpu_load + command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric: true + handlers: relay + subscribers: common + interval: 60 + +# Check whether nginx is running +- name: Check nginx process + community.general.sensu_check: + name: nginx_running + command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid + handlers: default + subscribers: nginx + interval: 60 + +# Stop monitoring the disk capacity. +# Note that the check will still show up in the sensu dashboard, +# to remove it completely you need to issue a DELETE request to the sensu api. +- name: Check disk + community.general.sensu_check: + name: check_disk_capacity + state: absent +''' + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def sensu_check(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + stream = None + try: + try: + stream = open(path, 'r') + config = json.load(stream) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + finally: + if stream: + stream.close() + + if 'checks' not in config: + if state == 'absent': + reasons.append('`checks\' section did not exist and state is `absent\'') + return changed, reasons + config['checks'] = {} + changed = True + reasons.append('`checks\' section did not exist') + + if state == 'absent': + if name in config['checks']: + del config['checks'][name] + changed = True + reasons.append('check was present and state is `absent\'') + + if state == 'present': + if name not in config['checks']: + check = {} + config['checks'][name] = check + changed = True + reasons.append('check was absent and state is `present\'') + else: + check = config['checks'][name] + simple_opts = ['command', + 'handlers', + 'subscribers', + 'interval', + 'timeout', + 'ttl', + 'handle', + 'dependencies', + 'standalone', + 'publish', + 'occurrences', + 'refresh', + 'aggregate', + 'low_flap_threshold', + 'high_flap_threshold', + 'source', + ] + for opt in simple_opts: + if module.params[opt] is not None: + if opt not in check or check[opt] != module.params[opt]: + check[opt] = module.params[opt] + changed = True + reasons.append('`{opt}\' did not exist or was different'.format(opt=opt)) + else: + if opt in check: + del check[opt] + changed = True + reasons.append('`{opt}\' was removed'.format(opt=opt)) + + if module.params['custom']: + # Convert to json + custom_params = module.params['custom'] + overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']) + if overwrited_fields: + msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) + module.fail_json(msg=msg) + + for k, v in custom_params.items(): + if k in config['checks'][name]: + if not config['checks'][name][k] == v: + changed = True + reasons.append('`custom param {opt}\' was changed'.format(opt=k)) + else: + changed = True + reasons.append('`custom param {opt}\' was added'.format(opt=k)) + check[k] = v + simple_opts += custom_params.keys() + + # Remove obsolete custom params + for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']): + changed = True + reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) + del check[opt] + + if module.params['metric']: + if 'type' not in check or check['type'] != 'metric': + check['type'] = 'metric' + changed = True + reasons.append('`type\' was not defined or not `metric\'') + if not module.params['metric'] and 'type' in check: + del check['type'] + changed = True + reasons.append('`type\' was defined') + + if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None: + subdue = {'begin': module.params['subdue_begin'], + 'end': module.params['subdue_end'], + } + if 'subdue' not in check or check['subdue'] != subdue: + check['subdue'] = subdue + changed = True + reasons.append('`subdue\' did not exist or was different') + else: + if 'subdue' in check: + del check['subdue'] + changed = True + reasons.append('`subdue\' was removed') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + try: + stream = open(path, 'w') + stream.write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + finally: + if stream: + stream.close() + + return changed, reasons + + +def main(): + + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': False}, + 'command': {'type': 'str'}, + 'handlers': {'type': 'list', 'elements': 'str'}, + 'subscribers': {'type': 'list', 'elements': 'str'}, + 'interval': {'type': 'int'}, + 'timeout': {'type': 'int'}, + 'ttl': {'type': 'int'}, + 'handle': {'type': 'bool'}, + 'subdue_begin': {'type': 'str'}, + 'subdue_end': {'type': 'str'}, + 'dependencies': {'type': 'list', 'elements': 'str'}, + 'metric': {'type': 'bool', 'default': False}, + 'standalone': {'type': 'bool'}, + 'publish': {'type': 'bool'}, + 'occurrences': {'type': 'int'}, + 'refresh': {'type': 'int'}, + 'aggregate': {'type': 'bool'}, + 'low_flap_threshold': {'type': 'int'}, + 'high_flap_threshold': {'type': 'int'}, + 'custom': {'type': 'dict'}, + 'source': {'type': 'str'}, + } + + required_together = [['subdue_begin', 'subdue_end']] + + module = AnsibleModule(argument_spec=arg_spec, + required_together=required_together, + supports_check_mode=True) + if module.params['state'] != 'absent' and module.params['command'] is None: + module.fail_json(msg="missing required arguments: %s" % ",".join(['command'])) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_check(module, path, name, state, backup) + + module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sensu_client.py b/ansible_collections/community/general/plugins/modules/sensu_client.py new file mode 100644 index 000000000..2e0bd12ee --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sensu_client.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sensu_client +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu client configuration +description: + - Manages Sensu client configuration. + - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Whether the client should be present or not + choices: [ 'present', 'absent' ] + default: present + name: + type: str + description: + - A unique name for the client. The name cannot contain special characters or spaces. + - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu). + address: + type: str + description: + - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. + - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). + subscriptions: + type: list + elements: str + description: + - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). + - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. + - The subscriptions array items must be strings. + safe_mode: + description: + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. + type: bool + default: false + redact: + type: list + elements: str + description: + - Client definition attributes to redact (values) when logging and sending client keepalives. + socket: + type: dict + description: + - The socket definition scope, used to configure the Sensu client socket. + keepalives: + description: + - If Sensu should monitor keepalives for this client. + type: bool + default: true + keepalive: + type: dict + description: + - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc). + registration: + type: dict + description: + - The registration definition scope, used to configure Sensu registration event handlers. + deregister: + description: + - If a deregistration event should be created upon Sensu client process stop. + - Default is C(false). + type: bool + deregistration: + type: dict + description: + - The deregistration definition scope, used to configure automated Sensu client de-registration. + ec2: + type: dict + description: + - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only). + chef: + type: dict + description: + - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only). + puppet: + type: dict + description: + - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only). + servicenow: + type: dict + description: + - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). +notes: + - Check mode is supported +''' + +EXAMPLES = ''' +# Minimum possible configuration +- name: Configure Sensu client + community.general.sensu_client: + subscriptions: + - default + +# With customization +- name: Configure Sensu client + community.general.sensu_client: + name: "{{ ansible_fqdn }}" + address: "{{ ansible_default_ipv4['address'] }}" + subscriptions: + - default + - webserver + redact: + - password + socket: + bind: 127.0.0.1 + port: 3030 + keepalive: + thresholds: + warning: 180 + critical: 300 + handlers: + - email + custom: + - broadcast: irc + occurrences: 3 + register: client + notify: + - Restart sensu-client + +- name: Secure Sensu client configuration file + ansible.builtin.file: + path: "{{ client['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" + +- name: Delete the Sensu client configuration + community.general.sensu_client: + state: "absent" +''' + +RETURN = ''' +config: + description: Effective client configuration, when state is present + returned: success + type: dict + sample: {'name': 'client', 'subscriptions': ['default']} +file: + description: Path to the client configuration file + returned: success + type: str + sample: "/etc/sensu/conf.d/client.json" +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(type='str', ), + address=dict(type='str', ), + subscriptions=dict(type='list', elements="str"), + safe_mode=dict(type='bool', default=False), + redact=dict(type='list', elements="str"), + socket=dict(type='dict'), + keepalives=dict(type='bool', default=True), + keepalive=dict(type='dict'), + registration=dict(type='dict'), + deregister=dict(type='bool'), + deregistration=dict(type='dict'), + ec2=dict(type='dict'), + chef=dict(type='dict'), + puppet=dict(type='dict'), + servicenow=dict(type='dict') + ), + required_if=[ + ['state', 'present', ['subscriptions']] + ] + ) + + state = module.params['state'] + path = "/etc/sensu/conf.d/client.json" + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it's okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build client configuration from module arguments + config = {'client': {}} + args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact', + 'socket', 'keepalives', 'keepalive', 'registration', 'deregister', + 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['client'][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it's invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Client configuration is already up to date', + config=config['client'], + file=path) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Client configuration would have been updated', + changed=True, + config=config['client'], + file=path) + + try: + with open(path, 'w') as client: + client.write(json.dumps(config, indent=4)) + module.exit_json(msg='Client configuration updated', + changed=True, + config=config['client'], + file=path) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sensu_handler.py b/ansible_collections/community/general/plugins/modules/sensu_handler.py new file mode 100644 index 000000000..bbb8dc612 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sensu_handler.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sensu_handler +author: "David Moreau Simard (@dmsimard)" +short_description: Manages Sensu handler configuration +description: + - Manages Sensu handler configuration + - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Whether the handler should be present or not + choices: [ 'present', 'absent' ] + default: present + name: + type: str + description: + - A unique name for the handler. The name cannot contain special characters or spaces. + required: true + type: + type: str + description: + - The handler type + choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] + filter: + type: str + description: + - The Sensu event filter (name) to use when filtering events for the handler. + filters: + type: list + elements: str + description: + - An array of Sensu event filters (names) to use when filtering events for the handler. + - Each array item must be a string. + severities: + type: list + elements: str + description: + - An array of check result severities the handler will handle. + - 'NOTE: event resolution bypasses this filtering.' + - "Example: [ 'warning', 'critical', 'unknown' ]." + mutator: + type: str + description: + - The Sensu event mutator (name) to use to mutate event data for the handler. + timeout: + type: int + description: + - The handler execution duration timeout in seconds (hard stop). + - Only used by pipe and tcp handler types. + default: 10 + handle_silenced: + description: + - If events matching one or more silence entries should be handled. + type: bool + default: false + handle_flapping: + description: + - If events in the flapping state should be handled. + type: bool + default: false + command: + type: str + description: + - The handler command to be executed. + - The event data is passed to the process via STDIN. + - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").' + socket: + type: dict + description: + - The socket definition scope, used to configure the TCP/UDP handler socket. + - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").' + pipe: + type: dict + description: + - The pipe definition scope, used to configure the Sensu transport pipe. + - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' + handlers: + type: list + elements: str + description: + - An array of Sensu event handlers (names) to use for events using the handler set. + - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' +notes: + - Check mode is supported +''' + +EXAMPLES = ''' +# Configure a handler that sends event data as STDIN (pipe) +- name: Configure IRC Sensu handler + community.general.sensu_handler: + name: "irc_handler" + type: "pipe" + command: "/usr/local/bin/notify-irc.sh" + severities: + - "ok" + - "critical" + - "warning" + - "unknown" + timeout: 15 + notify: + - Restart sensu-client + - Restart sensu-server + +# Delete a handler +- name: Delete IRC Sensu handler + community.general.sensu_handler: + name: "irc_handler" + state: "absent" + +# Example of a TCP handler +- name: Configure TCP Sensu handler + community.general.sensu_handler: + name: "tcp_handler" + type: "tcp" + timeout: 30 + socket: + host: "10.0.1.99" + port: 4444 + register: handler + notify: + - Restart sensu-client + - Restart sensu-server + +- name: Secure Sensu handler configuration file + ansible.builtin.file: + path: "{{ handler['file'] }}" + owner: "sensu" + group: "sensu" + mode: "0600" +''' + +RETURN = ''' +config: + description: Effective handler configuration, when state is present + returned: success + type: dict + sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} +file: + description: Path to the handler configuration file + returned: success + type: str + sample: "/etc/sensu/conf.d/handlers/irc.json" +name: + description: Name of the handler + returned: success + type: str + sample: "irc" +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(type='str', required=True), + type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), + filter=dict(type='str'), + filters=dict(type='list', elements='str'), + severities=dict(type='list', elements='str'), + mutator=dict(type='str'), + timeout=dict(type='int', default=10), + handle_silenced=dict(type='bool', default=False), + handle_flapping=dict(type='bool', default=False), + command=dict(type='str'), + socket=dict(type='dict'), + pipe=dict(type='dict'), + handlers=dict(type='list', elements='str'), + ), + required_if=[ + ['state', 'present', ['type']], + ['type', 'pipe', ['command']], + ['type', 'tcp', ['socket']], + ['type', 'udp', ['socket']], + ['type', 'transport', ['pipe']], + ['type', 'set', ['handlers']] + ] + ) + + state = module.params['state'] + name = module.params['name'] + path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name) + + if state == 'absent': + if os.path.exists(path): + if module.check_mode: + msg = '{path} would have been deleted'.format(path=path) + module.exit_json(msg=msg, changed=True) + else: + try: + os.remove(path) + msg = '{path} deleted successfully'.format(path=path) + module.exit_json(msg=msg, changed=True) + except OSError as e: + msg = 'Exception when trying to delete {path}: {exception}' + module.fail_json( + msg=msg.format(path=path, exception=str(e))) + else: + # Idempotency: it's okay if the file doesn't exist + msg = '{path} already does not exist'.format(path=path) + module.exit_json(msg=msg) + + # Build handler configuration from module arguments + config = {'handlers': {name: {}}} + args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout', + 'handle_silenced', 'handle_flapping', 'command', 'socket', + 'pipe', 'handlers'] + + for arg in args: + if arg in module.params and module.params[arg] is not None: + config['handlers'][name][arg] = module.params[arg] + + # Load the current config, if there is one, so we can compare + current_config = None + try: + current_config = json.load(open(path, 'r')) + except (IOError, ValueError): + # File either doesn't exist or it's invalid JSON + pass + + if current_config is not None and current_config == config: + # Config is the same, let's not change anything + module.exit_json(msg='Handler configuration is already up to date', + config=config['handlers'][name], + file=path, + name=name) + + # Validate that directory exists before trying to write to it + if not module.check_mode and not os.path.exists(os.path.dirname(path)): + try: + os.makedirs(os.path.dirname(path)) + except OSError as e: + module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path), + str(e))) + + if module.check_mode: + module.exit_json(msg='Handler configuration would have been updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + + try: + with open(path, 'w') as handler: + handler.write(json.dumps(config, indent=4)) + module.exit_json(msg='Handler configuration updated', + changed=True, + config=config['handlers'][name], + file=path, + name=name) + except (OSError, IOError) as e: + module.fail_json(msg='Unable to write file {0}: {1}'.format(path, + str(e))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sensu_silence.py b/ansible_collections/community/general/plugins/modules/sensu_silence.py new file mode 100644 index 000000000..14c664755 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sensu_silence.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Steven Bambling +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: sensu_silence +author: Steven Bambling (@smbambling) +short_description: Manage Sensu silence entries +description: + - Create and clear (delete) a silence entries via the Sensu API + for subscriptions and checks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + check: + type: str + description: + - Specifies the check which the silence entry applies to. + creator: + type: str + description: + - Specifies the entity responsible for this entry. + expire: + type: int + description: + - If specified, the silence entry will be automatically cleared + after this number of seconds. + expire_on_resolve: + description: + - If specified as true, the silence entry will be automatically + cleared once the condition it is silencing is resolved. + type: bool + reason: + type: str + description: + - If specified, this free-form string is used to provide context or + rationale for the reason this silence entry was created. + state: + type: str + description: + - Specifies to create or clear (delete) a silence entry via the Sensu API + default: present + choices: ['present', 'absent'] + subscription: + type: str + description: + - Specifies the subscription which the silence entry applies to. + - To create a silence entry for a client prepend C(client:) to client name. + Example - C(client:server1.example.dev) + required: true + url: + type: str + description: + - Specifies the URL of the Sensu monitoring host server. + required: false + default: http://127.0.01:4567 +''' + +EXAMPLES = ''' +# Silence ALL checks for a given client +- name: Silence server1.example.dev + community.general.sensu_silence: + subscription: client:server1.example.dev + creator: "{{ ansible_user_id }}" + reason: Performing maintenance + +# Silence specific check for a client +- name: Silence CPU_Usage check for server1.example.dev + community.general.sensu_silence: + subscription: client:server1.example.dev + check: CPU_Usage + creator: "{{ ansible_user_id }}" + reason: Investigation alert issue + +# Silence multiple clients from a dict + silence: + server1.example.dev: + reason: 'Deployment in progress' + server2.example.dev: + reason: 'Deployment in progress' + +- name: Silence several clients from a dict + community.general.sensu_silence: + subscription: "client:{{ item.key }}" + reason: "{{ item.value.reason }}" + creator: "{{ ansible_user_id }}" + with_dict: "{{ silence }}" +''' + +RETURN = ''' +''' + +import json + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def query(module, url, check, subscription): + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='GET', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] == 500: + module.fail_json( + msg="Failed to query silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, False + + +def clear(module, url, check, subscription): + # Test if silence exists before clearing + (rc, out, changed) = query(module, url, check, subscription) + + d = dict((i['subscription'], i['check']) for i in out) + subscription_exists = subscription in d + if check and subscription_exists: + exists = (check == d[subscription]) + else: + exists = subscription_exists + + # If check/subscription doesn't exist + # exit with changed state of False + if not exists: + return False, out, changed + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced/clear' + + request_data = { + 'check': check, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 204: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % (subscription, info) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def create( + module, url, check, creator, expire, + expire_on_resolve, reason, subscription): + (rc, out, changed) = query(module, url, check, subscription) + for i in out: + if (i['subscription'] == subscription): + if ( + (check is None or check == i['check']) and + ( + creator == '' or + creator == i['creator']) and + ( + reason == '' or + reason == i['reason']) and + ( + expire is None or expire == i['expire']) and + ( + expire_on_resolve is None or + expire_on_resolve == i['expire_on_resolve'] + ) + ): + return False, out, False + + # module.check_mode is inherited from the AnsibleMOdule class + if not module.check_mode: + headers = { + 'Content-Type': 'application/json', + } + + url = url + '/silenced' + + request_data = { + 'check': check, + 'creator': creator, + 'expire': expire, + 'expire_on_resolve': expire_on_resolve, + 'reason': reason, + 'subscription': subscription, + } + + # Remove keys with None value + for k, v in dict(request_data).items(): + if v is None: + del request_data[k] + + response, info = fetch_url( + module, url, method='POST', + headers=headers, data=json.dumps(request_data) + ) + + if info['status'] != 201: + module.fail_json( + msg="Failed to silence %s. Reason: %s" % + (subscription, info['msg']) + ) + + try: + json_out = json.loads(to_native(response.read())) + except Exception: + json_out = "" + + return False, json_out, True + return False, out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + check=dict(required=False), + creator=dict(required=False), + expire=dict(type='int', required=False), + expire_on_resolve=dict(type='bool', required=False), + reason=dict(required=False), + state=dict(default='present', choices=['present', 'absent']), + subscription=dict(required=True), + url=dict(required=False, default='http://127.0.01:4567'), + ), + supports_check_mode=True + ) + + url = module.params['url'] + check = module.params['check'] + creator = module.params['creator'] + expire = module.params['expire'] + expire_on_resolve = module.params['expire_on_resolve'] + reason = module.params['reason'] + subscription = module.params['subscription'] + state = module.params['state'] + + if state == 'present': + (rc, out, changed) = create( + module, url, check, creator, + expire, expire_on_resolve, reason, subscription + ) + + if state == 'absent': + (rc, out, changed) = clear(module, url, check, subscription) + + if rc != 0: + module.fail_json(msg="failed", result=out) + module.exit_json(msg="success", result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sensu_subscription.py b/ansible_collections/community/general/plugins/modules/sensu_subscription.py new file mode 100644 index 000000000..0077e2ffa --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sensu_subscription.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: sensu_subscription +short_description: Manage Sensu subscriptions +description: + - Manage which I(sensu channels) a machine should subscribe to +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the channel + required: true + state: + type: str + description: + - Whether the machine should subscribe or unsubscribe from the channel + choices: [ 'present', 'absent' ] + required: false + default: present + path: + type: str + description: + - Path to the subscriptions json file + required: false + default: /etc/sensu/conf.d/subscriptions.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so you + - can get the original file back if you somehow clobbered it incorrectly. + type: bool + required: false + default: false +requirements: [ ] +author: Anders Ingemann (@andsens) +''' + +RETURN = ''' +reasons: + description: the reasons why the module changed or did not change something + returned: success + type: list + sample: ["channel subscription was absent and state is `present'"] +''' + +EXAMPLES = ''' +# Subscribe to the nginx channel +- name: Subscribe to nginx checks + community.general.sensu_subscription: name=nginx + +# Unsubscribe from the common checks channel +- name: Unsubscribe from common checks + community.general.sensu_subscription: name=common state=absent +''' + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def sensu_subscription(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + config = json.load(open(path)) + except IOError as e: + if e.errno == 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'client' not in config: + if state == 'absent': + reasons.append('`client\' did not exist and state is `absent\'') + return changed, reasons + config['client'] = {} + changed = True + reasons.append('`client\' did not exist') + + if 'subscriptions' not in config['client']: + if state == 'absent': + reasons.append('`client.subscriptions\' did not exist and state is `absent\'') + return changed, reasons + config['client']['subscriptions'] = [] + changed = True + reasons.append('`client.subscriptions\' did not exist') + + if name not in config['client']['subscriptions']: + if state == 'absent': + reasons.append('channel subscription was absent') + return changed, reasons + config['client']['subscriptions'].append(name) + changed = True + reasons.append('channel subscription was absent and state is `present\'') + else: + if state == 'absent': + config['client']['subscriptions'].remove(name) + changed = True + reasons.append('channel subscription was present and state is `absent\'') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + open(path, 'w').write(json.dumps(config, indent=2) + '\n') + except IOError as e: + module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), + exception=traceback.format_exc()) + + return changed, reasons + + +def main(): + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'bool', 'default': False}, + } + + module = AnsibleModule(argument_spec=arg_spec, + supports_check_mode=True) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_subscription(module, path, name, state, backup) + + module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/seport.py b/ansible_collections/community/general/plugins/modules/seport.py new file mode 100644 index 000000000..964e8f0ed --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/seport.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: seport +short_description: Manages SELinux network port type definitions +description: + - Manages SELinux network port type definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + ports: + description: + - Ports or port ranges. + - Can be a list (since 2.6) or comma separated string. + type: list + elements: str + required: true + proto: + description: + - Protocol for the specified port. + type: str + required: true + choices: [ tcp, udp ] + setype: + description: + - SELinux type for the specified port. + type: str + required: true + state: + description: + - Desired boolean value. + type: str + choices: [ absent, present ] + default: present + reload: + description: + - Reload SELinux policy after commit. + type: bool + default: true + ignore_selinux_state: + description: + - Run independent of selinux runtime state + type: bool + default: false + local: + description: + - Work with local modifications only. + type: bool + default: false + version_added: 5.6.0 +notes: + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: +- libselinux-python +- policycoreutils-python +author: +- Dan Keder (@dankeder) +''' + +EXAMPLES = r''' +- name: Allow Apache to listen on tcp port 8888 + community.general.seport: + ports: 8888 + proto: tcp + setype: http_port_t + state: present + +- name: Allow sshd to listen on tcp port 8991 + community.general.seport: + ports: 8991 + proto: tcp + setype: ssh_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + community.general.seport: + ports: 10000-10100,10112 + proto: tcp + setype: memcache_port_t + state: present + +- name: Allow memcached to listen on tcp ports 10000-10100 and 10112 + community.general.seport: + ports: + - 10000-10100 + - 10112 + proto: tcp + setype: memcache_port_t + state: present + +- name: Remove tcp port 22 local modification if exists + community.general.seport: + ports: 22 + protocol: tcp + setype: ssh_port_t + state: absent + local: true +''' + +import traceback + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +def get_runtime_status(ignore_selinux_state=False): + return ignore_selinux_state or selinux.is_selinux_enabled() + + +def semanage_port_get_ports(seport, setype, proto, local): + """ Get the list of ports that have the specified type definition. + + :param community.general.seport: Instance of seobject.portRecords + + :type setype: str + :param setype: SELinux type. + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: list + :return: List of ports that have the specified SELinux type. + """ + records = seport.get_all_by_type(locallist=local) + if (setype, proto) in records: + return records[(setype, proto)] + else: + return [] + + +def semanage_port_get_type(seport, port, proto): + """ Get the SELinux type of the specified port. + + :param community.general.seport: Instance of seobject.portRecords + + :type port: str + :param port: Port or port range (example: "8080", "8080-9090") + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: tuple + :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found. + """ + if isinstance(port, str): + ports = port.split('-', 1) + if len(ports) == 1: + ports.extend(ports) + else: + ports = (port, port) + + key = (int(ports[0]), int(ports[1]), proto) + + records = seport.get_all() + return records.get(key) + + +def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore='', local=False): + """ Add SELinux port type definition to the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type serange: str + :param serange: SELinux MLS/MCS range (defaults to 's0') + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + change = False + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) + for port in ports: + if port in ports_by_type: + continue + + change = True + if module.check_mode: + continue + port_type = semanage_port_get_type(seport, port, proto) + if port_type is None: + seport.add(port, proto, serange, setype) + else: + seport.modify(port, proto, serange, setype) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local=False): + """ Delete SELinux port type definition from the policy. + + :type module: AnsibleModule + :param module: Ansible module + + :type ports: list + :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"]) + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :type setype: str + :param setype: SELinux type. + + :type do_reload: bool + :param do_reload: Whether to reload SELinux policy after commit + + :type sestore: str + :param sestore: SELinux store + + :rtype: bool + :return: True if the policy was changed, otherwise False + """ + change = False + try: + seport = seobject.portRecords(sestore) + seport.set_reload(do_reload) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) + for port in ports: + if port in ports_by_type: + change = True + if not module.check_mode: + seport.delete(port, proto) + + except (ValueError, IOError, KeyError, OSError, RuntimeError) as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc()) + + return change + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + ports=dict(type='list', elements='str', required=True), + proto=dict(type='str', required=True, choices=['tcp', 'udp']), + setype=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + local=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + ports = module.params['ports'] + proto = module.params['proto'] + setype = module.params['setype'] + state = module.params['state'] + do_reload = module.params['reload'] + local = module.params['local'] + + result = { + 'ports': ports, + 'proto': proto, + 'setype': setype, + 'state': state, + } + + if state == 'present': + result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload, local=local) + elif state == 'absent': + result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload, local=local) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/serverless.py b/ansible_collections/community/general/plugins/modules/serverless.py new file mode 100644 index 000000000..67d673d4d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/serverless.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Ryan Scott Brown +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: serverless +short_description: Manages a Serverless Framework project +description: + - Provides support for managing Serverless Framework (U(https://serverless.com/)) project deployments and stacks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Goal state of given stage/project. + type: str + choices: [ absent, present ] + default: present + serverless_bin_path: + description: + - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless + type: path + service_path: + description: + - The path to the root of the Serverless Service to be operated on. + type: path + required: true + stage: + description: + - The name of the serverless framework project stage to deploy to. + - This uses the serverless framework default "dev". + type: str + default: '' + region: + description: + - AWS region to deploy the service to. + - This parameter defaults to C(us-east-1). + type: str + default: '' + deploy: + description: + - Whether or not to deploy artifacts after building them. + - When this option is C(false) all the functions will be built, but no stack update will be run to send them out. + - This is mostly useful for generating artifacts to be stored/deployed elsewhere. + type: bool + default: true + force: + description: + - Whether or not to force full deployment, equivalent to serverless C(--force) option. + type: bool + default: false + verbose: + description: + - Shows all stack events during deployment, and display any Stack Output. + type: bool + default: false +notes: + - Currently, the C(serverless) command must be in the path of the node executing the task. + In the future this may be a flag. +requirements: +- serverless +- yaml +author: +- Ryan Scott Brown (@ryansb) +''' + +EXAMPLES = r''' +- name: Basic deploy of a service + community.general.serverless: + service_path: '{{ project_dir }}' + state: present + +- name: Deploy a project, then pull its resource list back into Ansible + community.general.serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + register: sls + +# The cloudformation stack is always named the same as the full service, so the +# cloudformation_info module can get a full list of the stack resources, as +# well as stack events and outputs +- cloudformation_info: + region: us-east-1 + stack_name: '{{ sls.service_name }}' + stack_resources: true + +- name: Deploy a project using a locally installed serverless binary + community.general.serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + serverless_bin_path: node_modules/.bin/serverless +''' + +RETURN = r''' +service_name: + type: str + description: The service name specified in the serverless.yml that was just deployed. + returned: always + sample: my-fancy-service-dev +state: + type: str + description: Whether the stack for the serverless project is present/absent. + returned: always +command: + type: str + description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module. + returned: always + sample: serverless deploy --stage production +''' + +import os + +try: + import yaml + HAS_YAML = True +except ImportError: + HAS_YAML = False + +from ansible.module_utils.basic import AnsibleModule + + +def read_serverless_config(module): + path = module.params.get('service_path') + full_path = os.path.join(path, 'serverless.yml') + + try: + with open(full_path) as sls_config: + config = yaml.safe_load(sls_config.read()) + return config + except IOError as e: + module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) + + +def get_service_name(module, stage): + config = read_serverless_config(module) + if config.get('service') is None: + module.fail_json(msg="Could not read `service` key from serverless.yml file") + + if stage: + return "{0}-{1}".format(config['service'], stage) + + return "{0}-{1}".format(config['service'], config.get('stage', 'dev')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + service_path=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + region=dict(type='str', default=''), + stage=dict(type='str', default=''), + deploy=dict(type='bool', default=True), + serverless_bin_path=dict(type='path'), + force=dict(type='bool', default=False), + verbose=dict(type='bool', default=False), + ), + ) + + if not HAS_YAML: + module.fail_json(msg='yaml is required for this module') + + service_path = module.params.get('service_path') + state = module.params.get('state') + region = module.params.get('region') + stage = module.params.get('stage') + deploy = module.params.get('deploy', True) + force = module.params.get('force', False) + verbose = module.params.get('verbose', False) + serverless_bin_path = module.params.get('serverless_bin_path') + + if serverless_bin_path is not None: + command = serverless_bin_path + " " + else: + command = module.get_bin_path("serverless") + " " + + if state == 'present': + command += 'deploy ' + elif state == 'absent': + command += 'remove ' + else: + module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state)) + + if state == 'present': + if not deploy: + command += '--noDeploy ' + elif force: + command += '--force ' + + if region: + command += '--region {0} '.format(region) + if stage: + command += '--stage {0} '.format(stage) + if verbose: + command += '--verbose ' + + rc, out, err = module.run_command(command, cwd=service_path) + if rc != 0: + if state == 'absent' and "-{0}' does not exist".format(stage) in out: + module.exit_json(changed=False, state='absent', command=command, + out=out, service_name=get_service_name(module, stage)) + + module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err)) + + # gather some facts about the deployment + module.exit_json(changed=True, state='present', out=out, command=command, + service_name=get_service_name(module, stage)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/shutdown.py b/ansible_collections/community/general/plugins/modules/shutdown.py new file mode 100644 index 000000000..5d66fad16 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/shutdown.py @@ -0,0 +1,81 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +DOCUMENTATION = r''' +module: shutdown +short_description: Shut down a machine +notes: + - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths) + to specify locations to search if the default paths do not work. +description: + - Shut downs a machine. +version_added: "1.1.0" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full +options: + delay: + description: + - Seconds to wait before shutdown. Passed as a parameter to the shutdown command. + - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0. + - On Solaris and FreeBSD, this will be seconds. + type: int + default: 0 + msg: + description: + - Message to display to users before shutdown. + type: str + default: Shut down initiated by Ansible + search_paths: + description: + - Paths to search on the remote machine for the C(shutdown) command. + - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command. + type: list + elements: path + default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] + +seealso: +- module: ansible.builtin.reboot +author: + - Matt Davis (@nitzmahone) + - Sam Doran (@samdoran) + - Amin Vakil (@aminvakil) +''' + +EXAMPLES = r''' +- name: Unconditionally shut down the machine with all defaults + community.general.shutdown: + +- name: Delay shutting down the remote node + community.general.shutdown: + delay: 60 + +- name: Shut down a machine with shutdown command in unusual place + community.general.shutdown: + search_paths: + - '/lib/molly-guard' +''' + +RETURN = r''' +shutdown: + description: C(true) if the machine has been shut down. + returned: always + type: bool + sample: true +''' diff --git a/ansible_collections/community/general/plugins/modules/sl_vm.py b/ansible_collections/community/general/plugins/modules/sl_vm.py new file mode 100644 index 000000000..94055d1d2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sl_vm.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sl_vm +short_description: Create or cancel a virtual instance in SoftLayer +description: + - Creates or cancels SoftLayer instances. + - When created, optionally waits for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + instance_id: + description: + - Instance Id of the virtual instance to perform action option. + type: str + hostname: + description: + - Hostname to be provided to a virtual instance. + type: str + domain: + description: + - Domain name to be provided to a virtual instance. + type: str + datacenter: + description: + - Datacenter for the virtual instance to be deployed. + type: str + choices: + - ams01 + - ams03 + - che01 + - dal01 + - dal05 + - dal06 + - dal09 + - dal10 + - dal12 + - dal13 + - fra02 + - fra04 + - fra05 + - hkg02 + - hou02 + - lon02 + - lon04 + - lon06 + - mel01 + - mex01 + - mil01 + - mon01 + - osl01 + - par01 + - sao01 + - sea01 + - seo01 + - sjc01 + - sjc03 + - sjc04 + - sng01 + - syd01 + - syd04 + - tok02 + - tor01 + - wdc01 + - wdc04 + - wdc06 + - wdc07 + tags: + description: + - Tag or list of tags to be provided to a virtual instance. + type: str + hourly: + description: + - Flag to determine if the instance should be hourly billed. + type: bool + default: true + private: + description: + - Flag to determine if the instance should be private only. + type: bool + default: false + dedicated: + description: + - Flag to determine if the instance should be deployed in dedicated space. + type: bool + default: false + local_disk: + description: + - Flag to determine if local disk should be used for the new instance. + type: bool + default: true + cpus: + description: + - Count of cpus to be assigned to new virtual instance. + type: int + choices: [1, 2, 4, 8, 16, 32, 56] + memory: + description: + - Amount of memory to be assigned to new virtual instance. + type: int + choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] + flavor: + description: + - Specify which SoftLayer flavor template to use instead of cpus and memory. + version_added: '0.2.0' + type: str + disks: + description: + - List of disk sizes to be assigned to new virtual instance. + default: [ 25 ] + type: list + elements: int + os_code: + description: + - OS Code to be used for new virtual instance. + type: str + image_id: + description: + - Image Template to be used for new virtual instance. + type: str + nic_speed: + description: + - NIC Speed to be assigned to new virtual instance. + choices: [10, 100, 1000] + type: int + public_vlan: + description: + - VLAN by its Id to be assigned to the public NIC. + type: str + private_vlan: + description: + - VLAN by its Id to be assigned to the private NIC. + type: str + ssh_keys: + description: + - List of ssh keys by their Id to be assigned to a virtual instance. + type: list + elements: str + default: [] + post_uri: + description: + - URL of a post provisioning script to be loaded and executed on virtual instance. + type: str + state: + description: + - Create, or cancel a virtual instance. + - Specify C(present) for create, C(absent) to cancel. + choices: [ absent, present ] + default: present + type: str + wait: + description: + - Flag used to wait for active status before returning. + type: bool + default: true + wait_time: + description: + - Time in seconds before wait returns. + default: 600 + type: int +requirements: + - python >= 2.6 + - softlayer >= 4.1.1 +author: +- Matt Colton (@mcltn) +''' + +EXAMPLES = ''' +- name: Build instance + hosts: localhost + gather_facts: false + tasks: + - name: Build instance request + community.general.sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: false + +- name: Build additional instances + hosts: localhost + gather_facts: false + tasks: + - name: Build instances request + community.general.sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + +- name: Cancel instances + hosts: localhost + gather_facts: false + tasks: + - name: Cancel by tag + community.general.sl_vm: + state: absent + tags: ansible-module-test +''' + +# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. +RETURN = '''# ''' + +import json +import time + +try: + import SoftLayer + from SoftLayer import VSManager + + HAS_SL = True + vsManager = VSManager(SoftLayer.create_client_from_env()) +except ImportError: + HAS_SL = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types + + +# TODO: get this info from API +STATES = ['present', 'absent'] +DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02', + 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01', + 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04', + 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07'] +CPU_SIZES = [1, 2, 4, 8, 16, 32, 56] +MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808] +INITIALDISK_SIZES = [25, 100] +LOCALDISK_SIZES = [25, 100, 150, 200, 300] +SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000] +NIC_SPEEDS = [10, 100, 1000] + + +def create_virtual_instance(module): + + instances = vsManager.list_instances( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + datacenter=module.params.get('datacenter') + ) + + if instances: + return False, None + + # Check if OS or Image Template is provided (Can't be both, defaults to OS) + if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): + module.params['image_id'] = '' + elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): + module.params['os_code'] = '' + module.params['disks'] = [] # Blank out disks since it will use the template + else: + return False, None + + tags = module.params.get('tags') + if isinstance(tags, list): + tags = ','.join(map(str, module.params.get('tags'))) + + instance = vsManager.create_instance( + hostname=module.params.get('hostname'), + domain=module.params.get('domain'), + cpus=module.params.get('cpus'), + memory=module.params.get('memory'), + flavor=module.params.get('flavor'), + hourly=module.params.get('hourly'), + datacenter=module.params.get('datacenter'), + os_code=module.params.get('os_code'), + image_id=module.params.get('image_id'), + local_disk=module.params.get('local_disk'), + disks=module.params.get('disks'), + ssh_keys=module.params.get('ssh_keys'), + nic_speed=module.params.get('nic_speed'), + private=module.params.get('private'), + public_vlan=module.params.get('public_vlan'), + private_vlan=module.params.get('private_vlan'), + dedicated=module.params.get('dedicated'), + post_uri=module.params.get('post_uri'), + tags=tags, + ) + + if instance is not None and instance['id'] > 0: + return True, instance + else: + return False, None + + +def wait_for_instance(module, id): + instance = None + completed = False + wait_timeout = time.time() + module.params.get('wait_time') + while not completed and wait_timeout > time.time(): + try: + completed = vsManager.wait_for_ready(id, 10, 2) + if completed: + instance = vsManager.get_instance(id) + except Exception: + completed = False + + return completed, instance + + +def cancel_instance(module): + canceled = True + if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): + tags = module.params.get('tags') + if isinstance(tags, string_types): + tags = [module.params.get('tags')] + instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) + for instance in instances: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + elif module.params.get('instance_id') and module.params.get('instance_id') != 0: + try: + vsManager.cancel_instance(instance['id']) + except Exception: + canceled = False + else: + return False, None + + return canceled, None + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + instance_id=dict(type='str'), + hostname=dict(type='str'), + domain=dict(type='str'), + datacenter=dict(type='str', choices=DATACENTERS), + tags=dict(type='str'), + hourly=dict(type='bool', default=True), + private=dict(type='bool', default=False), + dedicated=dict(type='bool', default=False), + local_disk=dict(type='bool', default=True), + cpus=dict(type='int', choices=CPU_SIZES), + memory=dict(type='int', choices=MEMORY_SIZES), + flavor=dict(type='str'), + disks=dict(type='list', elements='int', default=[25]), + os_code=dict(type='str'), + image_id=dict(type='str'), + nic_speed=dict(type='int', choices=NIC_SPEEDS), + public_vlan=dict(type='str'), + private_vlan=dict(type='str'), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), + post_uri=dict(type='str'), + state=dict(type='str', default='present', choices=STATES), + wait=dict(type='bool', default=True), + wait_time=dict(type='int', default=600), + ) + ) + + if not HAS_SL: + module.fail_json(msg='softlayer python library required for this module') + + if module.params.get('state') == 'absent': + (changed, instance) = cancel_instance(module) + + elif module.params.get('state') == 'present': + (changed, instance) = create_virtual_instance(module) + if module.params.get('wait') is True and instance: + (changed, instance) = wait_for_instance(module, instance['id']) + + module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/slack.py b/ansible_collections/community/general/plugins/modules/slack.py new file mode 100644 index 000000000..4e26f1973 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/slack.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Lee Goolsbee +# Copyright (c) 2020, Michal Middleton +# Copyright (c) 2017, Steve Pletcher +# Copyright (c) 2016, René Moser +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: slack +short_description: Send Slack notifications +description: + - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration +author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + domain: + type: str + description: + - Slack (sub)domain for your environment without protocol. (i.e. + C(example.slack.com)) In 1.8 and beyond, this is deprecated and may + be ignored. See token documentation for information. + token: + type: str + description: + - Slack integration token. This authenticates you to the slack service. + Make sure to use the correct type of token, depending on what method you use. + - "Webhook token: + Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In + 1.8 and above, ansible adapts to the new slack API where tokens look + like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then slack will ignore any value of domain. If + the token is in the old format the domain is required. Ansible has no + control of when slack will get rid of the old API. When slack does + that the old format will stop working. ** Please keep in mind the tokens + are not the API tokens but are the webhook tokens. In slack these are + found in the webhook URL which are obtained under the apps and integrations. + The incoming webhooks can be added in that area. In some cases this may + be locked by your Slack admin and you must request access. It is there + that the incoming webhooks can be added. The key is on the end of the + URL given to you in that section." + - "WebAPI token: + Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-) + or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. + See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." + required: true + msg: + type: str + description: + - Message to send. Note that the module does not handle escaping characters. + Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. + See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + channel: + type: str + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). + thread_id: + description: + - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading + type: str + message_id: + description: + - Optional. Message ID to edit, instead of posting a new message. + - If supplied I(channel_id) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get I(channel_id) from previous task run. + - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). + type: str + version_added: 1.2.0 + username: + type: str + description: + - This is the sender of the message. + default: "Ansible" + icon_url: + type: str + description: + - URL for the message sender's icon (default C(https://docs.ansible.com/favicon.ico)) + default: https://docs.ansible.com/favicon.ico + icon_emoji: + type: str + description: + - Emoji for the message sender. See Slack documentation for options. + (if I(icon_emoji) is set, I(icon_url) will not be used) + link_names: + type: int + description: + - Automatically create links for channels and usernames in I(msg). + default: 1 + choices: + - 1 + - 0 + parse: + type: str + description: + - Setting for the message parser at Slack + choices: + - 'full' + - 'none' + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + type: bool + default: true + color: + type: str + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. + - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. + - Specifying value in hex is supported since Ansible 2.8. + default: 'normal' + attachments: + type: list + elements: dict + description: + - Define a list of attachments. This list mirrors the Slack JSON API. + - For more information, see U(https://api.slack.com/docs/attachments). + blocks: + description: + - Define a list of blocks. This list mirrors the Slack JSON API. + - For more information, see U(https://api.slack.com/block-kit). + type: list + elements: dict + version_added: 1.0.0 + prepend_hash: + type: str + description: + - Setting for automatically prepending a C(#) symbol on the passed in I(channel_id). + - The C(auto) method prepends a C(#) unless I(channel_id) starts with one of C(#), C(@), C(C0), C(GF), C(G0), C(CP). + These prefixes only cover a small set of the prefixes that should not have a C(#) prepended. + Since an exact condition which I(channel_id) values must not have the C(#) prefix is not known, + the value C(auto) for this option will be deprecated in the future. It is best to explicitly set + I(prepend_hash=always) or I(prepend_hash=never) to obtain the needed behavior. + choices: + - 'always' + - 'never' + - 'auto' + default: 'auto' + version_added: 6.1.0 +""" + +EXAMPLES = """ +- name: Send notification message via Slack + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + delegate_to: localhost + +- name: Send notification message via Slack all options + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} completed' + channel: '#ansible' + thread_id: '1539917263.000100' + username: 'Ansible on {{ inventory_hostname }}' + icon_url: http://www.example.com/some-image-file.png + link_names: 0 + parse: 'none' + delegate_to: localhost + +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + community.general.slack: + token: thetoken/generatedby/slack + msg: '{{ inventory_hostname }} is alive!' + color: good + username: '' + icon_url: '' + +- name: Insert a color bar in front of the message with valid hex color value + community.general.slack: + token: thetoken/generatedby/slack + msg: 'This message uses color in hex value' + color: '#00aacc' + username: '' + icon_url: '' + +- name: Use the attachments API + community.general.slack: + token: thetoken/generatedby/slack + attachments: + - text: Display my system load on host A and B + color: '#ff00dd' + title: System load + fields: + - title: System A + value: "load average: 0,74, 0,66, 0,63" + short: true + - title: System B + value: 'load average: 5,16, 4,64, 2,43' + short: true + +- name: Use the blocks API + community.general.slack: + token: thetoken/generatedby/slack + blocks: + - type: section + text: + type: mrkdwn + text: |- + *System load* + Display my system load on host A and B + - type: context + elements: + - type: mrkdwn + text: |- + *System A* + load average: 0,74, 0,66, 0,63 + - type: mrkdwn + text: |- + *System B* + load average: 5,16, 4,64, 2,43 + +- name: Send a message with a link using Slack markup + community.general.slack: + token: thetoken/generatedby/slack + msg: We sent this message using ! + +- name: Send a message with angle brackets and ampersands + community.general.slack: + token: thetoken/generatedby/slack + msg: This message has <brackets> & ampersands in plain text. + +- name: Initial Threaded Slack message + community.general.slack: + channel: '#ansible' + token: xoxb-1234-56789abcdefghijklmnop + msg: 'Starting a thread with my initial post.' + register: slack_response +- name: Add more info to thread + community.general.slack: + channel: '#ansible' + token: xoxb-1234-56789abcdefghijklmnop + thread_id: "{{ slack_response['ts'] }}" + color: good + msg: 'And this is my threaded response!' + +- name: Send a message to be edited later on + community.general.slack: + token: thetoken/generatedby/slack + channel: '#ansible' + msg: Deploying something... + register: slack_response +- name: Edit message + community.general.slack: + token: thetoken/generatedby/slack + # The 'channel' option does not accept the channel name. It must use the 'channel_id', + # which can be retrieved for example from 'slack_response' from the previous task. + channel: "{{ slack_response.channel }}" + msg: Deployment complete! + message_id: "{{ slack_response.ts }}" +""" + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + +OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' +SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' +SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage' +SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update' +SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history' + +# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. +# We do not escape other characters used as Slack metacharacters (e.g. &, <, >). +escape_table = { + '"': "\"", + "'": "\'", +} + + +def is_valid_hex_color(color_choice): + if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice): + return True + return False + + +def escape_quotes(text): + """Backslash any quotes within text.""" + return "".join(escape_table.get(c, c) for c in text) + + +def recursive_escape_quotes(obj, keys): + """Recursively escape quotes inside supplied keys inside block kit objects""" + if isinstance(obj, dict): + escaped = {} + for k, v in obj.items(): + if isinstance(v, str) and k in keys: + escaped[k] = escape_quotes(v) + else: + escaped[k] = recursive_escape_quotes(v, keys) + elif isinstance(obj, list): + escaped = [recursive_escape_quotes(v, keys) for v in obj] + else: + escaped = obj + return escaped + + +def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments, blocks, message_id, prepend_hash): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=escape_quotes(text)) + elif text is not None: + # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. + payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) + if channel is not None: + if prepend_hash == 'auto': + if channel.startswith(('#', '@', 'C0', 'GF', 'G0', 'CP')): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + elif prepend_hash == 'always': + payload['channel'] = '#' + channel + elif prepend_hash == 'never': + payload['channel'] = channel + if thread_id is not None: + payload['thread_ts'] = thread_id + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + if parse is not None: + payload['parse'] = parse + if message_id is not None: + payload['ts'] = message_id + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + attachment_keys_to_escape = [ + 'title', + 'text', + 'author_name', + 'pretext', + 'fallback', + ] + for attachment in attachments: + for key in attachment_keys_to_escape: + if key in attachment: + attachment[key] = escape_quotes(attachment[key]) + + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + + payload['attachments'].append(attachment) + + if blocks is not None: + block_keys_to_escape = [ + 'text', + 'alt_text' + ] + payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape) + + return payload + + +def get_slack_message(module, token, channel, ts): + headers = { + 'Content-Type': 'application/json; charset=UTF-8', + 'Accept': 'application/json', + 'Authorization': 'Bearer ' + token + } + qs = urlencode({ + 'channel': channel, + 'ts': ts, + 'limit': 1, + 'inclusive': 'true', + }) + url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs + response, info = fetch_url(module=module, url=url, headers=headers, method='GET') + if info['status'] != 200: + module.fail_json(msg="failed to get slack message") + data = module.from_json(response.read()) + if len(data['messages']) < 1: + module.fail_json(msg="no messages matching ts: %s" % ts) + if len(data['messages']) > 1: + module.fail_json(msg="more than 1 message matching ts: %s" % ts) + return data['messages'][0] + + +def do_notify_slack(module, domain, token, payload): + use_webapi = False + if token.count('/') >= 2: + # New style webhook token + slack_uri = SLACK_INCOMING_WEBHOOK % token + elif re.match(r'^xox[abp]-\S+$', token): + slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI + use_webapi = True + else: + if not domain: + module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form " + "XXXX/YYYY/ZZZZ in your playbook") + slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) + + headers = { + 'Content-Type': 'application/json; charset=UTF-8', + 'Accept': 'application/json', + } + if use_webapi: + headers['Authorization'] = 'Bearer ' + token + + data = module.jsonify(payload) + response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data) + + if info['status'] != 200: + if use_webapi: + obscured_incoming_webhook = slack_uri + else: + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' + module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) + + # each API requires different handling + if use_webapi: + return module.from_json(response.read()) + else: + return {'webhook': 'ok'} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + domain=dict(type='str'), + token=dict(type='str', required=True, no_log=True), + msg=dict(type='str'), + channel=dict(type='str'), + thread_id=dict(type='str'), + username=dict(type='str', default='Ansible'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + icon_emoji=dict(type='str'), + link_names=dict(type='int', default=1, choices=[0, 1]), + parse=dict(type='str', choices=['none', 'full']), + validate_certs=dict(default=True, type='bool'), + color=dict(type='str', default='normal'), + attachments=dict(type='list', elements='dict'), + blocks=dict(type='list', elements='dict'), + message_id=dict(type='str'), + prepend_hash=dict(type='str', default='auto', choices=['always', 'never', 'auto']), + ), + supports_check_mode=True, + ) + + domain = module.params['domain'] + token = module.params['token'] + text = module.params['msg'] + channel = module.params['channel'] + thread_id = module.params['thread_id'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + parse = module.params['parse'] + color = module.params['color'] + attachments = module.params['attachments'] + blocks = module.params['blocks'] + message_id = module.params['message_id'] + prepend_hash = module.params['prepend_hash'] + + color_choices = ['normal', 'good', 'warning', 'danger'] + if color not in color_choices and not is_valid_hex_color(color): + module.fail_json(msg="Color value specified should be either one of %r " + "or any valid hex value with length 3 or 6." % color_choices) + + changed = True + + # if updating an existing message, we can check if there's anything to update + if message_id is not None: + changed = False + msg = get_slack_message(module, token, channel, message_id) + for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): + if msg.get(key) != module.params.get(key): + changed = True + break + # if check mode is active, we shouldn't do anything regardless. + # if changed=False, we don't need to do anything, so don't do it. + if module.check_mode or not changed: + module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel']) + elif module.check_mode: + module.exit_json(changed=changed) + + payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, + parse, color, attachments, blocks, message_id, prepend_hash) + slack_response = do_notify_slack(module, domain, token, payload) + + if 'ok' in slack_response: + # Evaluate WebAPI response + if slack_response['ok']: + # return payload as a string for backwards compatibility + payload_json = module.jsonify(payload) + module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'], + api=slack_response, payload=payload_json) + else: + module.fail_json(msg="Slack API error", error=slack_response['error']) + else: + # Exit with plain OK from WebHook, since we don't have more information + # If we get 200 from webhook, the only answer is OK + module.exit_json(msg="OK") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/slackpkg.py b/ansible_collections/community/general/plugins/modules/slackpkg.py new file mode 100644 index 000000000..208061a4c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/slackpkg.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Kim Nørgaard +# Written by Kim Nørgaard +# Based on pkgng module written by bleader +# that was based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: slackpkg +short_description: Package manager for Slackware >= 12.2 +description: + - Manage binary packages for Slackware using 'slackpkg' which + is available in versions after 12.2. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - name of package to install/remove + required: true + type: list + elements: str + aliases: [pkg] + + state: + description: + - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent). + choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ] + required: false + default: present + type: str + + update_cache: + description: + - update the package database first + required: false + default: false + type: bool + +author: Kim Nørgaard (@KimNorgaard) +requirements: [ "Slackware >= 12.2" ] +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.slackpkg: + name: foo + state: present + +- name: Remove packages foo and bar + community.general.slackpkg: + name: foo,bar + state: absent + +- name: Make sure that it is the most updated package + community.general.slackpkg: + name: foo + state: latest +''' + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, slackpkg_path, name): + + import platform + import os + import re + + machine = platform.machine() + # Exception for kernel-headers package on x86_64 + if name == 'kernel-headers' and machine == 'x86_64': + machine = 'x86' + pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine))) + packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)] + + if len(packages) > 0: + return True + + return False + + +def remove_packages(module, slackpkg_path, packages): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + remove %s" % (slackpkg_path, + package)) + + if not module.check_mode and query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, slackpkg_path, packages): + + install_c = 0 + + for package in packages: + if query_package(module, slackpkg_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + install %s" % (slackpkg_path, + package)) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def upgrade_packages(module, slackpkg_path, packages): + install_c = 0 + + for package in packages: + if not module.check_mode: + rc, out, err = module.run_command("%s -default_answer=y -batch=on \ + upgrade %s" % (slackpkg_path, + package)) + + if not module.check_mode and not query_package(module, slackpkg_path, + package): + module.fail_json(msg="failed to install %s: %s" % (package, out), + stderr=err) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" + % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def update_cache(module, slackpkg_path): + rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path)) + if rc != 0: + module.fail_json(msg="Could not update package cache") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']), + name=dict(aliases=["pkg"], required=True, type='list', elements='str'), + update_cache=dict(default=False, type='bool'), + ), + supports_check_mode=True) + + slackpkg_path = module.get_bin_path('slackpkg', True) + + p = module.params + + pkgs = p['name'] + + if p["update_cache"]: + update_cache(module, slackpkg_path) + + if p['state'] == 'latest': + upgrade_packages(module, slackpkg_path, pkgs) + + elif p['state'] in ['present', 'installed']: + install_packages(module, slackpkg_path, pkgs) + + elif p["state"] in ['removed', 'absent']: + remove_packages(module, slackpkg_path, pkgs) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/smartos_image_info.py new file mode 100644 index 000000000..e93ffb9ac --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/smartos_image_info.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: smartos_image_info +short_description: Get SmartOS image details +description: + - Retrieve information about all installed images on SmartOS. + - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)! +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + filters: + description: + - Criteria for selecting image. Can be any value from image + manifest and 'published_date', 'published', 'source', 'clones', + and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) + under 'imgadm list'. + type: str +''' + +EXAMPLES = ''' +- name: Return information about all installed images + community.general.smartos_image_info: + register: result + +- name: Return all private active Linux images + community.general.smartos_image_info: + filters: "os=linux state=active public=false" + register: result + +- name: Show, how many clones does every image have + community.general.smartos_image_info: + register: result + +- name: Print information + ansible.builtin.debug: + msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} + has {{ result.smartos_images[item]['clones'] }} VM(s)" + with_items: "{{ result.smartos_images.keys() | list }}" + +- name: Print information + ansible.builtin.debug: + msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] }} VM(s)" + with_items: "{{ smartos_images.keys() | list }}" +''' + +RETURN = ''' +''' + +import json +from ansible.module_utils.basic import AnsibleModule + + +class ImageFacts(object): + + def __init__(self, module): + self.module = module + + self.filters = module.params['filters'] + + def return_all_installed_images(self): + cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] + + if self.filters: + cmd.append(self.filters) + + (rc, out, err) = self.module.run_command(cmd) + + if rc != 0: + self.module.exit_json( + msg='Failed to get all installed images', stderr=err) + + images = json.loads(out) + + result = {} + for image in images: + result[image['manifest']['uuid']] = image['manifest'] + # Merge additional attributes with the image manifest. + for attrib in ['clones', 'source', 'zpool']: + result[image['manifest']['uuid']][attrib] = image[attrib] + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filters=dict(), + ), + supports_check_mode=True, + ) + + image_facts = ImageFacts(module) + + data = dict(smartos_images=image_facts.return_all_installed_images()) + + module.exit_json(**data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/snap.py b/ansible_collections/community/general/plugins/modules/snap.py new file mode 100644 index 000000000..4b798d6e2 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/snap.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021, Alexei Znamensky (russoz) +# Copyright (c) 2021, Marcus Rickert +# Copyright (c) 2018, Stanislas Lange (angristan) +# Copyright (c) 2018, Victor Carceler + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: snap +short_description: Manages snaps +description: + - "Manages snaps packages." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the snaps. + required: true + type: list + elements: str + state: + description: + - Desired state of the package. + required: false + default: present + choices: [ absent, present, enabled, disabled ] + type: str + classic: + description: + - Confinement policy. The classic confinement allows a snap to have + the same level of access to the system as "classic" packages, + like those managed by APT. This option corresponds to the --classic argument. + This option can only be specified if there is a single snap in the task. + type: bool + required: false + default: false + channel: + description: + - Define which release of a snap is installed and tracked for updates. + This option can only be specified if there is a single snap in the task. + type: str + required: false + default: stable + options: + description: + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied + to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in I(name). Options will + only be applied to active snaps. + required: false + type: list + elements: str + version_added: 4.4.0 + +author: + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) + +seealso: + - module: community.general.snap_alias +''' + +EXAMPLES = ''' +# Install "foo" and "bar" snap +- name: Install foo + community.general.snap: + name: + - foo + - bar + +# Install "foo" snap with options par1=A and par2=B +- name: Install "foo" with options + community.general.snap: + name: + - foo + options: + - par1=A + - par2=B + +# Install "foo" and "bar" snaps with common option com=A and specific options fooPar=X and barPar=Y +- name: Install "foo" and "bar" with options + community.general.snap: + name: + - foo + - bar + options: + - com=A + - foo:fooPar=X + - bar:barPar=Y + +# Remove "foo" snap +- name: Remove foo + community.general.snap: + name: foo + state: absent + +# Install a snap with classic confinement +- name: Install "foo" with option --classic + community.general.snap: + name: foo + classic: true + +# Install a snap with from a specific channel +- name: Install "foo" with option --channel=latest/edge + community.general.snap: + name: foo + channel: latest/edge +''' + +RETURN = ''' +classic: + description: Whether or not the snaps were installed with the classic confinement + type: bool + returned: When snaps are installed +channel: + description: The channel the snaps were installed from + type: str + returned: When snaps are installed +cmd: + description: The command that was executed on the host + type: str + returned: When changed is true +snaps_installed: + description: The list of actually installed snaps + type: list + returned: When any snaps have been installed +snaps_removed: + description: The list of actually removed snaps + type: list + returned: When any snaps have been removed +options_changed: + description: The list of options set/changed in format C(snap:key=value). + type: list + returned: When any options have been changed/set + version_added: 4.4.0 +''' + +import re +import json +import numbers + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.module_helper import ( + CmdStateModuleHelper, ArgFormat +) + + +__state_map = dict( + present='install', + absent='remove', + enabled='enable', + disabled='disable', + info='info', # not public + list='list', # not public + set='set', # not public + get='get', # not public +) + + +def _state_map(value): + return [__state_map[value]] + + +class Snap(CmdStateModuleHelper): + __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') + __set_param_re = re.compile(r'(?P\S+:)?(?P\S+)\s*=\s*(?P.+)') + module = dict( + argument_spec={ + 'name': dict(type='list', elements='str', required=True), + 'state': dict(type='str', default='present', + choices=['absent', 'present', 'enabled', 'disabled']), + 'classic': dict(type='bool', default=False), + 'channel': dict(type='str', default='stable'), + 'options': dict(type='list', elements='str'), + }, + supports_check_mode=True, + ) + command = "snap" + command_args_formats = dict( + actionable_snaps=dict(fmt=lambda v: v), + state=dict(fmt=_state_map), + classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), + channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]), + options=dict(fmt=list), + json_format=dict(fmt="-d", style=ArgFormat.BOOLEAN), + ) + check_rc = False + + @staticmethod + def _first_non_zero(a): + for elem in a: + if elem != 0: + return elem + + return 0 + + def _run_multiple_commands(self, commands): + outputs = [(c,) + self.run_command(params=c) for c in commands] + results = ([], [], [], []) + for output in outputs: + for i in range(4): + results[i].append(output[i]) + + return [ + '; '.join([to_native(x) for x in results[0]]), + self._first_non_zero(results[1]), + '\n'.join(results[2]), + '\n'.join(results[3]), + ] + + def convert_json_subtree_to_map(self, json_subtree, prefix=None): + option_map = {} + + if not isinstance(json_subtree, dict): + self.do_raise("Non-dict non-leaf element encountered while parsing option map. " + "The output format of 'snap set' may have changed. Aborting!") + + for key, value in json_subtree.items(): + full_key = key if prefix is None else prefix + "." + key + + if isinstance(value, (str, float, bool, numbers.Integral)): + option_map[full_key] = str(value) + + else: + option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key)) + + return option_map + + def convert_json_to_map(self, json_string): + json_object = json.loads(json_string) + return self.convert_json_subtree_to_map(json_object) + + def retrieve_option_map(self, snap_name): + params = [{'state': 'get'}, {'name': snap_name}, {'json_format': True}] + rc, out, err = self.run_command(params=params) + + if rc != 0: + return {} + + result = out.splitlines() + + if "has no configuration" in result[0]: + return {} + + try: + option_map = self.convert_json_to_map(out) + + except Exception as e: + self.do_raise( + msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out)) + + return option_map + + def is_snap_installed(self, snap_name): + return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] + + def is_snap_enabled(self, snap_name): + rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}]) + if rc != 0: + return None + result = out.splitlines()[1] + match = self.__disable_re.match(result) + if not match: + self.do_raise(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) + notes = match.group('notes') + return "disabled" not in notes.split(',') + + def process_actionable_snaps(self, actionable_snaps): + self.changed = True + self.vars.snaps_installed = actionable_snaps + + if self.module.check_mode: + return + + params = ['state', 'classic', 'channel'] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + has_multiple_snaps = len(actionable_snaps) > 1 + + if has_one_pkg_params and has_multiple_snaps: + commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps] + else: + commands = [params + [{'actionable_snaps': actionable_snaps}]] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + + if rc == 0: + return + + classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' + r' was published using classic confinement') + match = classic_snap_pattern.match(err) + if match: + err_pkg = match.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_present(self): + + self.vars.meta('classic').set(output=True) + self.vars.meta('channel').set(output=True) + actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] + + if actionable_snaps: + self.process_actionable_snaps(actionable_snaps) + + self.set_options() + + def set_options(self): + if self.vars.options is None: + return + + actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + overall_options_changed = [] + + for snap_name in actionable_snaps: + option_map = self.retrieve_option_map(snap_name=snap_name) + + options_changed = [] + + for option_string in self.vars.options: + match = self.__set_param_re.match(option_string) + + if not match: + msg = "Cannot parse set option '{option_string}'".format(option_string=option_string) + self.do_raise(msg) + + snap_prefix = match.group("snap_prefix") + selected_snap_name = snap_prefix[:-1] if snap_prefix else None + + if selected_snap_name is not None and selected_snap_name not in self.vars.name: + msg = "Snap option '{option_string}' refers to snap which is not in the list of snap names".format(option_string=option_string) + self.do_raise(msg) + + if selected_snap_name is None or (snap_name is not None and snap_name == selected_snap_name): + key = match.group("key") + value = match.group("value").strip() + + if key not in option_map or key in option_map and option_map[key] != value: + option_without_prefix = key + "=" + value + option_with_prefix = option_string if selected_snap_name is not None else snap_name + ":" + option_string + options_changed.append(option_without_prefix) + overall_options_changed.append(option_with_prefix) + + if options_changed: + self.changed = True + + if not self.module.check_mode: + params = [{'state': 'set'}, {'name': snap_name}, {'options': options_changed}] + + rc, out, err = self.run_command(params=params) + + if rc != 0: + if 'has no "configure" hook' in err: + msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name) + self.do_raise(msg) + + msg = "Cannot set options '{options}' for snap '{snap}': error={error}".format( + options=" ".join(options_changed), snap=snap_name, error=err) + self.do_raise(msg) + + if overall_options_changed: + self.vars.options_changed = overall_options_changed + + def _generic_state_action(self, actionable_func, actionable_var, params=None): + actionable_snaps = [s for s in self.vars.name if actionable_func(s)] + if not actionable_snaps: + return + self.changed = True + self.vars[actionable_var] = actionable_snaps + if self.module.check_mode: + return + if params is None: + params = ['state'] + commands = [params + [{'actionable_snaps': actionable_snaps}]] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_absent(self): + self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state']) + + def state_enabled(self): + self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) + + def state_disabled(self): + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) + + +def main(): + Snap.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/snap_alias.py b/ansible_collections/community/general/plugins/modules/snap_alias.py new file mode 100644 index 000000000..19fbef003 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/snap_alias.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021, Alexei Znamensky (russoz) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: snap_alias +short_description: Manages snap aliases +version_added: 4.0.0 +description: + - "Manages snaps aliases." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Desired state of the alias. + type: str + choices: [ absent, present ] + default: present + name: + description: + - Name of the snap. + type: str + alias: + description: + - Aliases to be created or removed. + type: list + elements: str + aliases: [aliases] + +author: + - Alexei Znamensky (@russoz) + +seealso: + - module: community.general.snap +''' + +EXAMPLES = ''' +# Install "foo" and "bar" snap +- name: Create snap alias + community.general.snap_alias: + name: hello-world + alias: hw + +- name: Create multiple aliases + community.general.snap_alias: + name: hello-world + aliases: + - hw + - hw2 + - hw3 + state: present # optional + +- name: Remove one specific aliases + community.general.snap_alias: + name: hw + state: absent + +- name: Remove all aliases for snap + community.general.snap_alias: + name: hello-world + state: absent +''' + +RETURN = ''' +snap_aliases: + description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. + type: list + elements: str + returned: always +''' + + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +_state_map = dict( + present='alias', + absent='unalias', + info='aliases', +) + + +class SnapAlias(StateModuleHelper): + _RE_ALIAS_LIST = re.compile(r"^(?P\S+)\s+(?P[\w-]+)\s+.*$") + + module = dict( + argument_spec={ + 'state': dict(type='str', choices=['absent', 'present'], default='present'), + 'name': dict(type='str'), + 'alias': dict(type='list', elements='str', aliases=['aliases']), + }, + required_if=[ + ('state', 'present', ['name', 'alias']), + ('state', 'absent', ['name', 'alias'], True), + ], + supports_check_mode=True, + ) + + command_args_formats = { + "state": cmd_runner_fmt.as_map(_state_map), + "name": cmd_runner_fmt.as_list(), + "alias": cmd_runner_fmt.as_list(), + } + + def _aliases(self): + n = self.vars.name + return {n: self._get_aliases_for(n)} if n else self._get_aliases() + + def __init_module__(self): + self.runner = CmdRunner(self.module, "snap", self.command_args_formats, check_rc=False) + self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) + + def __quit_module__(self): + self.vars.snap_aliases = self._aliases() + + def _get_aliases(self): + def process(rc, out, err): + if err: + return {} + aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]] + snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases] + results = {} + for snap, alias in snap_alias_list: + results[snap] = results.get(snap, []) + [alias] + return results + + with self.runner("state name", check_rc=True, output_process=process) as ctx: + aliases = ctx.run(state="info") + if self.verbosity >= 4: + self.vars.get_aliases_run_info = ctx.run_info + return aliases + + def _get_aliases_for(self, name): + return self._get_aliases().get(name, []) + + def _has_alias(self, name=None, alias=None): + if name: + if name not in self.vars.snap_aliases: + return False + if alias is None: + return bool(self.vars.snap_aliases[name]) + return alias in self.vars.snap_aliases[name] + + return any(alias in aliases for aliases in self.vars.snap_aliases.values()) + + def state_present(self): + for _alias in self.vars.alias: + if not self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state name alias", check_mode_skip=True) as ctx: + ctx.run(alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + def state_absent(self): + if not self.vars.alias: + if self._has_alias(self.vars.name): + self.changed = True + with self.runner("state name", check_mode_skip=True) as ctx: + ctx.run() + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + else: + for _alias in self.vars.alias: + if self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state alias", check_mode_skip=True) as ctx: + ctx.run(alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + SnapAlias.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/snmp_facts.py b/ansible_collections/community/general/plugins/modules/snmp_facts.py new file mode 100644 index 000000000..e54473ffa --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/snmp_facts.py @@ -0,0 +1,475 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Networklore's snmp library for Ansible +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: snmp_facts +author: +- Patrick Ogenstad (@ogenstad) +short_description: Retrieve facts for a device using SNMP +description: + - Retrieve facts for a device using SNMP, the facts will be + inserted to the ansible_facts key. +requirements: + - pysnmp +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + host: + description: + - Set to target SNMP server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, C(v2), C(v2c) or C(v3). + type: str + required: true + choices: [ v2, v2c, v3 ] + community: + description: + - The SNMP community string, required if I(version) is C(v2) or C(v2c). + type: str + level: + description: + - Authentication level. + - Required if I(version) is C(v3). + type: str + choices: [ authNoPriv, authPriv ] + username: + description: + - Username for SNMPv3. + - Required if I(version) is C(v3). + type: str + integrity: + description: + - Hashing algorithm. + - Required if I(version) is C(v3). + type: str + choices: [ md5, sha ] + authkey: + description: + - Authentication key. + - Required I(version) is C(v3). + type: str + privacy: + description: + - Encryption algorithm. + - Required if I(level) is C(authPriv). + type: str + choices: [ aes, des ] + privkey: + description: + - Encryption key. + - Required if I(level) is C(authPriv). + type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 +''' + +EXAMPLES = r''' +- name: Gather facts with SNMP version 2 + community.general.snmp_facts: + host: '{{ inventory_hostname }}' + version: v2c + community: public + delegate_to: local + +- name: Gather facts using SNMP version 3 + community.general.snmp_facts: + host: '{{ inventory_hostname }}' + version: v3 + level: authPriv + integrity: sha + privacy: aes + username: snmp-user + authkey: abc12345 + privkey: def6789 + delegate_to: localhost +''' + +RETURN = r''' +ansible_sysdescr: + description: A textual description of the entity. + returned: success + type: str + sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 +ansible_sysobjectid: + description: The vendor's authoritative identification of the network management subsystem contained in the entity. + returned: success + type: str + sample: 1.3.6.1.4.1.8072.3.2.10 +ansible_sysuptime: + description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized. + returned: success + type: int + sample: 42388 +ansible_syscontact: + description: The textual identification of the contact person for this managed node, together with information on how to contact this person. + returned: success + type: str + sample: Me +ansible_sysname: + description: An administratively-assigned name for this managed node. + returned: success + type: str + sample: ubuntu-user +ansible_syslocation: + description: The physical location of this node (e.g., C(telephone closet, 3rd floor)). + returned: success + type: str + sample: Sitting on the Dock of the Bay +ansible_all_ipv4_addresses: + description: List of all IPv4 addresses. + returned: success + type: list + sample: ["127.0.0.1", "172.17.0.1"] +ansible_interfaces: + description: Dictionary of each network interface and its metadata. + returned: success + type: dict + sample: { + "1": { + "adminstatus": "up", + "description": "", + "ifindex": "1", + "ipv4": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } + ], + "mac": "", + "mtu": "65536", + "name": "lo", + "operstatus": "up", + "speed": "65536" + }, + "2": { + "adminstatus": "up", + "description": "", + "ifindex": "2", + "ipv4": [ + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } + ], + "mac": "000a305a52a1", + "mtu": "1500", + "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", + "operstatus": "up", + "speed": "1500" + } + } +''' + +import binascii +from collections import defaultdict +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text + +with deps.declare("pysnmp"): + from pysnmp.entity.rfc3413.oneliner import cmdgen + from pysnmp.proto.rfc1905 import EndOfMibView + + +class DefineOid(object): + + def __init__(self, dotprefix=False): + if dotprefix: + dp = "." + else: + dp = "" + + # From SNMPv2-MIB + self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" + self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" + self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" + self.sysContact = dp + "1.3.6.1.2.1.1.4.0" + self.sysName = dp + "1.3.6.1.2.1.1.5.0" + self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" + + # From IF-MIB + self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" + self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" + self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4" + self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5" + self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" + self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" + self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" + self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" + + # From IP-MIB + self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" + self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" + self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" + + +def decode_hex(hexstring): + + if len(hexstring) < 3: + return hexstring + if hexstring[:2] == "0x": + return to_text(binascii.unhexlify(hexstring[2:])) + return hexstring + + +def decode_mac(hexstring): + + if len(hexstring) != 14: + return hexstring + if hexstring[:2] == "0x": + return hexstring[2:] + return hexstring + + +def lookup_adminstatus(int_adminstatus): + adminstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing' + } + if int_adminstatus in adminstatus_options: + return adminstatus_options[int_adminstatus] + return "" + + +def lookup_operstatus(int_operstatus): + operstatus_options = { + 1: 'up', + 2: 'down', + 3: 'testing', + 4: 'unknown', + 5: 'dormant', + 6: 'notPresent', + 7: 'lowerLayerDown' + } + if int_operstatus in operstatus_options: + return operstatus_options[int_operstatus] + return "" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']), + community=dict(type='str'), + username=dict(type='str'), + level=dict(type='str', choices=['authNoPriv', 'authPriv']), + integrity=dict(type='str', choices=['md5', 'sha']), + privacy=dict(type='str', choices=['aes', 'des']), + authkey=dict(type='str', no_log=True), + privkey=dict(type='str', no_log=True), + timeout=dict(type='int'), + retries=dict(type='int'), + ), + required_together=( + ['username', 'level', 'integrity', 'authkey'], + ['privacy', 'privkey'], + ), + supports_check_mode=True, + ) + + m_args = module.params + + deps.validate(module) + + cmdGen = cmdgen.CommandGenerator() + transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) + + # Verify that we receive a community when using snmp v2 + if m_args['version'] in ("v2", "v2c"): + if m_args['community'] is None: + module.fail_json(msg='Community not set when using snmp version 2') + + if m_args['version'] == "v3": + if m_args['username'] is None: + module.fail_json(msg='Username not set when using snmp version 3') + + if m_args['level'] == "authPriv" and m_args['privacy'] is None: + module.fail_json(msg='Privacy algorithm not set when using authPriv') + + if m_args['integrity'] == "sha": + integrity_proto = cmdgen.usmHMACSHAAuthProtocol + elif m_args['integrity'] == "md5": + integrity_proto = cmdgen.usmHMACMD5AuthProtocol + + if m_args['privacy'] == "aes": + privacy_proto = cmdgen.usmAesCfb128Protocol + elif m_args['privacy'] == "des": + privacy_proto = cmdgen.usmDESPrivProtocol + + # Use SNMP Version 2 + if m_args['version'] in ("v2", "v2c"): + snmp_auth = cmdgen.CommunityData(m_args['community']) + + # Use SNMP Version 3 with authNoPriv + elif m_args['level'] == "authNoPriv": + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto) + + # Use SNMP Version 3 with authPriv + else: + snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, + privProtocol=privacy_proto) + + # Use p to prefix OIDs with a dot for polling + p = DefineOid(dotprefix=True) + # Use v without a prefix to use with return values + v = DefineOid(dotprefix=False) + + def Tree(): + return defaultdict(Tree) + + results = Tree() + + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), + cmdgen.MibVariable(p.sysDescr,), + cmdgen.MibVariable(p.sysObjectId,), + cmdgen.MibVariable(p.sysUpTime,), + cmdgen.MibVariable(p.sysContact,), + cmdgen.MibVariable(p.sysName,), + cmdgen.MibVariable(p.sysLocation,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if current_oid == v.sysDescr: + results['ansible_sysdescr'] = decode_hex(current_val) + elif current_oid == v.sysObjectId: + results['ansible_sysobjectid'] = current_val + elif current_oid == v.sysUpTime: + results['ansible_sysuptime'] = current_val + elif current_oid == v.sysContact: + results['ansible_syscontact'] = current_val + elif current_oid == v.sysName: + results['ansible_sysname'] = current_val + elif current_oid == v.sysLocation: + results['ansible_syslocation'] = current_val + + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), + cmdgen.MibVariable(p.ifIndex,), + cmdgen.MibVariable(p.ifDescr,), + cmdgen.MibVariable(p.ifMtu,), + cmdgen.MibVariable(p.ifSpeed,), + cmdgen.MibVariable(p.ifPhysAddress,), + cmdgen.MibVariable(p.ifAdminStatus,), + cmdgen.MibVariable(p.ifOperStatus,), + cmdgen.MibVariable(p.ipAdEntAddr,), + cmdgen.MibVariable(p.ipAdEntIfIndex,), + cmdgen.MibVariable(p.ipAdEntNetMask,), + + cmdgen.MibVariable(p.ifAlias,), + lookupMib=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication)) + + interface_indexes = [] + + all_ipv4_addresses = [] + ipv4_networks = Tree() + + for varBinds in varTable: + for oid, val in varBinds: + if isinstance(val, EndOfMibView): + continue + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if v.ifIndex in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['ifindex'] = current_val + interface_indexes.append(ifIndex) + if v.ifDescr in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['name'] = current_val + if v.ifMtu in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mtu'] = current_val + if v.ifSpeed in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['speed'] = current_val + if v.ifPhysAddress in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val) + if v.ifAdminStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) + if v.ifOperStatus in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) + if v.ipAdEntAddr in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['address'] = current_val + all_ipv4_addresses.append(current_val) + if v.ipAdEntIfIndex in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['interface'] = current_val + if v.ipAdEntNetMask in current_oid: + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + ipv4_networks[curIP]['netmask'] = current_val + + if v.ifAlias in current_oid: + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + results['ansible_interfaces'][ifIndex]['description'] = current_val + + interface_to_ipv4 = {} + for ipv4_network in ipv4_networks: + current_interface = ipv4_networks[ipv4_network]['interface'] + current_network = { + 'address': ipv4_networks[ipv4_network]['address'], + 'netmask': ipv4_networks[ipv4_network]['netmask'] + } + if current_interface not in interface_to_ipv4: + interface_to_ipv4[current_interface] = [] + interface_to_ipv4[current_interface].append(current_network) + else: + interface_to_ipv4[current_interface].append(current_network) + + for interface in interface_to_ipv4: + results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] + + results['ansible_all_ipv4_addresses'] = all_ipv4_addresses + + module.exit_json(ansible_facts=results) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/solaris_zone.py b/ansible_collections/community/general/plugins/modules/solaris_zone.py new file mode 100644 index 000000000..0f970704e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/solaris_zone.py @@ -0,0 +1,493 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Paul Markham +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: solaris_zone +short_description: Manage Solaris zones +description: + - Create, start, stop and delete Solaris zones. + - This module does not currently allow changing of options for a zone that is already been created. +author: + - Paul Markham (@pmarkham) +requirements: + - Solaris 10 or 11 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - C(present), configure and install the zone. + - C(installed), synonym for C(present). + - C(running), if the zone already exists, boot it, otherwise, configure and install + the zone first, then boot it. + - C(started), synonym for C(running). + - C(stopped), shutdown a zone. + - C(absent), destroy the zone. + - C(configured), configure the ready so that it's to be attached. + - C(attached), attach a zone, but do not boot it. + - C(detached), shutdown and detach a zone + type: str + choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ] + default: present + name: + description: + - Zone name. + - A zone name must be unique name. + - A zone name must begin with an alpha-numeric character. + - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.). + - The name cannot be longer than 64 characters. + type: str + required: true + path: + description: + - The path where the zone will be created. This is required when the zone is created, but not + used otherwise. + type: str + sparse: + description: + - Whether to create a sparse (C(true)) or whole root (C(false)) zone. + type: bool + default: false + root_password: + description: + - The password hash for the root account. If not specified, the zone's root account + will not have a password. + type: str + config: + description: + - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options + and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g. + "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"' + type: str + default: '' + create_options: + description: + - 'Extra options to the zonecfg(1M) create command.' + type: str + default: '' + install_options: + description: + - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, + use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"' + type: str + default: '' + attach_options: + description: + - 'Extra options to the zoneadm attach command. For example, this can be used to specify + whether a minimum or full update of packages is required and if any packages need to + be deleted. For valid values, see zoneadm(1M)' + type: str + default: '' + timeout: + description: + - Timeout, in seconds, for zone to boot. + type: int + default: 600 +''' + +EXAMPLES = ''' +- name: Create and install a zone, but don't boot it + community.general.solaris_zone: + name: zone1 + state: present + path: /zones/zone1 + sparse: true + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Create and install a zone and boot it + community.general.solaris_zone: + name: zone1 + state: running + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Boot an already installed zone + community.general.solaris_zone: + name: zone1 + state: running + +- name: Stop a zone + community.general.solaris_zone: + name: zone1 + state: stopped + +- name: Destroy a zone + community.general.solaris_zone: + name: zone1 + state: absent + +- name: Detach a zone + community.general.solaris_zone: + name: zone1 + state: detached + +- name: Configure a zone, ready to be attached + community.general.solaris_zone: + name: zone1 + state: configured + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' + +- name: Attach zone1 + community.general.solaris_zone: + name: zone1 + state: attached + attach_options: -u +''' + +import os +import platform +import re +import tempfile +import time + +from ansible.module_utils.basic import AnsibleModule + + +class Zone(object): + def __init__(self, module): + self.changed = False + self.msg = [] + + self.module = module + self.path = self.module.params['path'] + self.name = self.module.params['name'] + self.sparse = self.module.params['sparse'] + self.root_password = self.module.params['root_password'] + self.timeout = self.module.params['timeout'] + self.config = self.module.params['config'] + self.create_options = self.module.params['create_options'] + self.install_options = self.module.params['install_options'] + self.attach_options = self.module.params['attach_options'] + + self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True) + self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True) + self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True) + + if self.module.check_mode: + self.msg.append('Running in check mode') + + if platform.system() != 'SunOS': + self.module.fail_json(msg='This module requires Solaris') + + (self.os_major, self.os_minor) = platform.release().split('.') + if int(self.os_minor) < 10: + self.module.fail_json(msg='This module requires Solaris 10 or later') + + match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name) + if not match: + self.module.fail_json(msg="Provided zone name is not a valid zone name. " + "Please refer documentation for correct zone name specifications.") + + def configure(self): + if not self.path: + self.module.fail_json(msg='Missing required argument: path') + + if not self.module.check_mode: + t = tempfile.NamedTemporaryFile(delete=False, mode='wt') + + if self.sparse: + t.write('create %s\n' % self.create_options) + self.msg.append('creating sparse-root zone') + else: + t.write('create -b %s\n' % self.create_options) + self.msg.append('creating whole-root zone') + + t.write('set zonepath=%s\n' % self.path) + t.write('%s\n' % self.config) + t.close() + + cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create zone. %s' % (out + err)) + os.unlink(t.name) + + self.changed = True + self.msg.append('zone configured') + + def install(self): + if not self.module.check_mode: + cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to install zone. %s' % (out + err)) + if int(self.os_minor) == 10: + self.configure_sysid() + self.configure_password() + self.configure_ssh_keys() + self.changed = True + self.msg.append('zone installed') + + def uninstall(self): + if self.is_installed(): + if not self.module.check_mode: + cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone uninstalled') + + def configure_sysid(self): + if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path): + os.unlink('%s/root/etc/.UNCONFIGURED' % self.path) + + open('%s/root/noautoshutdown' % self.path, 'w').close() + + node = open('%s/root/etc/nodename' % self.path, 'w') + node.write(self.name) + node.close() + + id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w') + id.write('1 # System previously configured?\n') + id.write('1 # Bootparams succeeded?\n') + id.write('1 # System is on a network?\n') + id.write('1 # Extended network information gathered?\n') + id.write('0 # Autobinder succeeded?\n') + id.write('1 # Network has subnets?\n') + id.write('1 # root password prompted for?\n') + id.write('1 # locale and term prompted for?\n') + id.write('1 # security policy in place\n') + id.write('1 # NFSv4 domain configured\n') + id.write('0 # Auto Registration Configured\n') + id.write('vt100') + id.close() + + def configure_ssh_keys(self): + rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path + dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path + + if not os.path.isfile(rsa_key_file): + cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err)) + + if not os.path.isfile(dsa_key_file): + cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err)) + + def configure_password(self): + shadow = '%s/root/etc/shadow' % self.path + if self.root_password: + f = open(shadow, 'r') + lines = f.readlines() + f.close() + + for i in range(0, len(lines)): + fields = lines[i].split(':') + if fields[0] == 'root': + fields[1] = self.root_password + lines[i] = ':'.join(fields) + + f = open(shadow, 'w') + for line in lines: + f.write(line) + f.close() + + def boot(self): + if not self.module.check_mode: + cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to boot zone. %s' % (out + err)) + + """ + The boot command can return before the zone has fully booted. This is especially + true on the first boot when the zone initializes the SMF services. Unless the zone + has fully booted, subsequent tasks in the playbook may fail as services aren't running yet. + Wait until the zone's console login is running; once that's running, consider the zone booted. + """ + + elapsed = 0 + while True: + if elapsed > self.timeout: + self.module.fail_json(msg='timed out waiting for zone to boot') + rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name) + if rc == 0: + break + time.sleep(10) + elapsed += 10 + self.changed = True + self.msg.append('zone booted') + + def destroy(self): + if self.is_running(): + self.stop() + if self.is_installed(): + self.uninstall() + if not self.module.check_mode: + cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to delete zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone deleted') + + def stop(self): + if not self.module.check_mode: + cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to stop zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone stopped') + + def detach(self): + if not self.module.check_mode: + cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to detach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone detached') + + def attach(self): + if not self.module.check_mode: + cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options) + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg='Failed to attach zone. %s' % (out + err)) + self.changed = True + self.msg.append('zone attached') + + def exists(self): + cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return True + else: + return False + + def is_running(self): + return self.status() == 'running' + + def is_installed(self): + return self.status() == 'installed' + + def is_configured(self): + return self.status() == 'configured' + + def status(self): + cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name) + (rc, out, err) = self.module.run_command(cmd) + if rc == 0: + return out.split(':')[2] + else: + return 'undefined' + + def state_present(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + self.install() + + def state_running(self): + self.state_present() + if self.is_running(): + self.msg.append('zone already running') + else: + self.boot() + + def state_stopped(self): + if self.exists(): + self.stop() + else: + self.module.fail_json(msg='zone does not exist') + + def state_absent(self): + if self.exists(): + if self.is_running(): + self.stop() + self.destroy() + else: + self.msg.append('zone does not exist') + + def state_configured(self): + if self.exists(): + self.msg.append('zone already exists') + else: + self.configure() + + def state_detached(self): + if not self.exists(): + self.module.fail_json(msg='zone does not exist') + if self.is_configured(): + self.msg.append('zone already detached') + else: + self.stop() + self.detach() + + def state_attached(self): + if not self.exists(): + self.msg.append('zone does not exist') + if self.is_configured(): + self.attach() + else: + self.msg.append('zone already attached') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', + choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']), + path=dict(type='str'), + sparse=dict(type='bool', default=False), + root_password=dict(type='str', no_log=True), + timeout=dict(type='int', default=600), + config=dict(type='str', default=''), + create_options=dict(type='str', default=''), + install_options=dict(type='str', default=''), + attach_options=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + zone = Zone(module) + + state = module.params['state'] + + if state == 'running' or state == 'started': + zone.state_running() + elif state == 'present' or state == 'installed': + zone.state_present() + elif state == 'stopped': + zone.state_stopped() + elif state == 'absent': + zone.state_absent() + elif state == 'configured': + zone.state_configured() + elif state == 'detached': + zone.state_detached() + elif state == 'attached': + zone.state_attached() + else: + module.fail_json(msg='Invalid state: %s' % state) + + module.exit_json(changed=zone.changed, msg=', '.join(zone.msg)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sorcery.py b/ansible_collections/community/general/plugins/modules/sorcery.py new file mode 100644 index 000000000..3278ce0ab --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sorcery.py @@ -0,0 +1,653 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015-2016, Vlad Glagolev +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: sorcery +short_description: Package manager for Source Mage GNU/Linux +description: + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain +author: "Vlad Glagolev (@vaygr)" +notes: + - When all three components are selected, the update goes by the sequence -- + Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. + - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not + yet supported. +requirements: + - bash +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the spell + - multiple names can be given, separated by commas + - special value '*' in conjunction with states C(latest) or + C(rebuild) will update or rebuild the whole system respectively + aliases: ["spell"] + type: list + elements: str + + state: + description: + - Whether to cast, dispel or rebuild a package + - state C(cast) is an equivalent of C(present), not C(latest) + - state C(latest) always triggers I(update_cache=true) + - state C(rebuild) implies cast of all specified spells, not only + those existed before + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + type: str + + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell + (or make sure it is built) with; use +/- in front of dependency + to turn it on/off ('+' is optional though) + - this option is ignored if C(name) parameter is equal to '*' or + contains more than one spell + - providers must be supplied in the form recognized by Sorcery, e.g. + 'openssl(SSL)' + type: str + + update: + description: + - Whether or not to update sorcery scripts at the very first stage + type: bool + default: false + + update_cache: + description: + - Whether or not to update grimoire collection before casting spells + type: bool + default: false + aliases: ["update_codex"] + + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update + - especially useful for SCM and rsync grimoires + - makes sense only in pair with C(update_cache) + type: int + default: 0 +''' + + +EXAMPLES = ''' +- name: Make sure spell foo is installed + community.general.sorcery: + spell: foo + state: present + +- name: Make sure spells foo, bar and baz are removed + community.general.sorcery: + spell: foo,bar,baz + state: absent + +- name: Make sure spell foo with dependencies bar and baz is installed + community.general.sorcery: + spell: foo + depends: bar,baz + state: present + +- name: Make sure spell foo with bar and without baz dependencies is installed + community.general.sorcery: + spell: foo + depends: +bar,-baz + state: present + +- name: Make sure spell foo with libressl (providing SSL) dependency is installed + community.general.sorcery: + spell: foo + depends: libressl(SSL) + state: present + +- name: Make sure spells with/without required dependencies (if any) are installed + community.general.sorcery: + name: "{{ item.spell }}" + depends: "{{ item.depends | default(None) }}" + state: present + loop: + - { spell: 'vifm', depends: '+file,-gtk+2' } + - { spell: 'fwknop', depends: 'gpgme' } + - { spell: 'pv,tnftp,tor' } + +- name: Install the latest version of spell foo using regular glossary + community.general.sorcery: + name: foo + state: latest + +- name: Rebuild spell foo + community.general.sorcery: + spell: foo + state: rebuild + +- name: Rebuild the whole system, but update Sorcery and Codex first + community.general.sorcery: + spell: '*' + state: rebuild + update: true + update_cache: true + +- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias + community.general.sorcery: + update_codex: true + cache_valid_time: 86400 + +- name: Update only Sorcery itself + community.general.sorcery: + update: true +''' + + +RETURN = ''' +''' + + +import datetime +import fileinput +import os +import re +import shutil +import sys + + +# auto-filled at module init +SORCERY = { + 'sorcery': None, + 'scribe': None, + 'cast': None, + 'dispel': None, + 'gaze': None +} + +SORCERY_LOG_DIR = "/var/log/sorcery" +SORCERY_STATE_DIR = "/var/state/sorcery" + + +def get_sorcery_ver(module): + """ Get Sorcery version. """ + + cmd_sorcery = "%s --version" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0 or not stdout: + module.fail_json(msg="unable to get Sorcery version") + + return stdout.strip() + + +def codex_fresh(codex, module): + """ Check if grimoire collection is fresh enough. """ + + if not module.params['cache_valid_time']: + return False + + timedelta = datetime.timedelta(seconds=module.params['cache_valid_time']) + + for grimoire in codex: + lastupdate_path = os.path.join(SORCERY_STATE_DIR, + grimoire + ".lastupdate") + + try: + mtime = os.stat(lastupdate_path).st_mtime + except Exception: + return False + + lastupdate_ts = datetime.datetime.fromtimestamp(mtime) + + # if any grimoire is not fresh, we invalidate the Codex + if lastupdate_ts + timedelta < datetime.datetime.now(): + return False + + return True + + +def codex_list(module): + """ List valid grimoire collection. """ + + codex = {} + + cmd_scribe = "%s index" % SORCERY['scribe'] + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to list grimoire collection, fix your Codex") + + rex = re.compile(r"^\s*\[\d+\] : (?P[\w\-+.]+) : [\w\-+./]+(?: : (?P[\w\-+.]+))?\s*$") + + # drop 4-line header and empty trailing line + for line in stdout.splitlines()[4:-1]: + match = rex.match(line) + + if match: + codex[match.group('grim')] = match.group('ver') + + if not codex: + module.fail_json(msg="no grimoires to operate on; add at least one") + + return codex + + +def update_sorcery(module): + """ Update sorcery scripts. + + This runs 'sorcery update' ('sorcery -u'). Check mode always returns a + positive change value. + + """ + + changed = False + + if module.check_mode: + if not module.params['name'] and not module.params['update_cache']: + module.exit_json(changed=True, msg="would have updated Sorcery") + else: + sorcery_ver = get_sorcery_ver(module) + + cmd_sorcery = "%s update" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="unable to update Sorcery: " + stdout) + + if sorcery_ver != get_sorcery_ver(module): + changed = True + + if not module.params['name'] and not module.params['update_cache']: + module.exit_json(changed=changed, + msg="successfully updated Sorcery") + + +def update_codex(module): + """ Update grimoire collections. + + This runs 'scribe update'. Check mode always returns a positive change + value when 'cache_valid_time' is used. + + """ + + params = module.params + + changed = False + + codex = codex_list(module) + fresh = codex_fresh(codex, module) + + if module.check_mode: + if not params['name']: + if not fresh: + changed = True + + module.exit_json(changed=changed, msg="would have updated Codex") + elif not fresh or params['name'] and params['state'] == 'latest': + # SILENT is required as a workaround for query() in libgpg + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_scribe = "%s update" % SORCERY['scribe'] + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to update Codex: " + stdout) + + if codex != codex_list(module): + changed = True + + if not params['name']: + module.exit_json(changed=changed, + msg="successfully updated Codex") + + +def match_depends(module): + """ Check for matching dependencies. + + This inspects spell's dependencies with the desired states and returns + 'False' if a recast is needed to match them. It also adds required lines + to the system-wide depends file for proper recast procedure. + + """ + + params = module.params + spells = params['name'] + + depends = {} + + depends_ok = True + + if len(spells) > 1 or not params['depends']: + return depends_ok + + spell = spells[0] + + if module.check_mode: + sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends") + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check") + + try: + shutil.copy2(sorcery_depends_orig, sorcery_depends) + except IOError: + module.fail_json(msg="failed to copy depends.check file") + else: + sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends") + + rex = re.compile(r"^(?P\+?|\-){1}(?P[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$") + + for d in params['depends'].split(','): + match = rex.match(d) + + if not match: + module.fail_json(msg="wrong depends line for spell '%s'" % spell) + + # normalize status + if not match.group('status') or match.group('status') == '+': + status = 'on' + else: + status = 'off' + + depends[match.group('depend')] = status + + # drop providers spec + depends_list = [s.split('(')[0] for s in depends] + + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + if rc != 0: + module.fail_json(msg="wrong dependencies for spell '%s'" % spell) + + fi = fileinput.input(sorcery_depends, inplace=True) + + try: + try: + for line in fi: + if line.startswith(spell + ':'): + match = None + + for d in depends: + # when local status is 'off' and dependency is provider, + # use only provider value + d_offset = d.find('(') + + if d_offset == -1: + d_p = '' + else: + d_p = re.escape(d[d_offset:]) + + # .escape() is needed mostly for the spells like 'libsigc++' + rex = re.compile("%s:(?:%s|%s):(?Pon|off):optional:" % + (re.escape(spell), re.escape(d), d_p)) + + match = rex.match(line) + + # we matched the line "spell:dependency:on|off:optional:" + if match: + # if we also matched the local status, mark dependency + # as empty and put it back into depends file + if match.group('lstatus') == depends[d]: + depends[d] = None + + sys.stdout.write(line) + + # status is not that we need, so keep this dependency + # in the list for further reverse switching; + # stop and process the next line in both cases + break + + if not match: + sys.stdout.write(line) + else: + sys.stdout.write(line) + except IOError: + module.fail_json(msg="I/O error on the depends file") + finally: + fi.close() + + depends_new = [v for v in depends if depends[v]] + + if depends_new: + try: + try: + fl = open(sorcery_depends, 'a') + + for k in depends_new: + fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) + except IOError: + module.fail_json(msg="I/O error on the depends file") + finally: + fl.close() + + depends_ok = False + + if module.check_mode: + try: + os.remove(sorcery_depends) + except IOError: + module.fail_json(msg="failed to clean up depends.backup file") + + return depends_ok + + +def manage_spells(module): + """ Cast or dispel spells. + + This manages the whole system ('*'), list or a single spell. Command 'cast' + is used to install or rebuild spells, while 'dispel' takes care of theirs + removal from the system. + + """ + + params = module.params + spells = params['name'] + + sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install") + + if spells == '*': + if params['state'] == 'latest': + # back up original queue + try: + os.rename(sorcery_queue, sorcery_queue + ".backup") + except IOError: + module.fail_json(msg="failed to backup the update queue") + + # see update_codex() + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_sorcery = "%s queue" + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to generate the update queue") + + try: + queue_size = os.stat(sorcery_queue).st_size + except Exception: + module.fail_json(msg="failed to read the update queue") + + if queue_size != 0: + if module.check_mode: + try: + os.rename(sorcery_queue + ".backup", sorcery_queue) + except IOError: + module.fail_json(msg="failed to restore the update queue") + + module.exit_json(changed=True, msg="would have updated the system") + + cmd_cast = "%s --queue" % SORCERY['cast'] + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to update the system") + + module.exit_json(changed=True, msg="successfully updated the system") + else: + module.exit_json(changed=False, msg="the system is already up to date") + elif params['state'] == 'rebuild': + if module.check_mode: + module.exit_json(changed=True, msg="would have rebuilt the system") + + cmd_sorcery = "%s rebuild" % SORCERY['sorcery'] + + rc, stdout, stderr = module.run_command(cmd_sorcery) + + if rc != 0: + module.fail_json(msg="failed to rebuild the system: " + stdout) + + module.exit_json(changed=True, msg="successfully rebuilt the system") + else: + module.fail_json(msg="unsupported operation on '*' name value") + else: + if params['state'] in ('present', 'latest', 'rebuild', 'absent'): + # extract versions from the 'gaze' command + cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells)) + + rc, stdout, stderr = module.run_command(cmd_gaze) + + # fail if any of spells cannot be found + if rc != 0: + module.fail_json(msg="failed to locate spell(s) in the list (%s)" % + ', '.join(spells)) + + cast_queue = [] + dispel_queue = [] + + rex = re.compile(r"[^|]+\|[^|]+\|(?P[^|]+)\|(?P[^|]+)\|(?P[^$]+)") + + # drop 2-line header and empty trailing line + for line in stdout.splitlines()[2:-1]: + match = rex.match(line) + + cast = False + + if params['state'] == 'present': + # spell is not installed.. + if match.group('inst_ver') == '-': + # ..so set up depends reqs for it + match_depends(module) + + cast = True + # spell is installed.. + else: + # ..but does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'latest': + # grimoire and installed versions do not match.. + if match.group('grim_ver') != match.group('inst_ver'): + # ..so check for depends reqs first and set them up + match_depends(module) + + cast = True + # grimoire and installed versions match.. + else: + # ..but the spell does not conform depends reqs + if not match_depends(module): + cast = True + elif params['state'] == 'rebuild': + cast = True + # 'absent' + else: + if match.group('inst_ver') != '-': + dispel_queue.append(match.group('spell')) + + if cast: + cast_queue.append(match.group('spell')) + + if cast_queue: + if module.check_mode: + module.exit_json(changed=True, msg="would have cast spell(s)") + + cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue)) + + rc, stdout, stderr = module.run_command(cmd_cast) + + if rc != 0: + module.fail_json(msg="failed to cast spell(s): %s" + stdout) + + module.exit_json(changed=True, msg="successfully cast spell(s)") + elif params['state'] != 'absent': + module.exit_json(changed=False, msg="spell(s) are already cast") + + if dispel_queue: + if module.check_mode: + module.exit_json(changed=True, msg="would have dispelled spell(s)") + + cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue)) + + rc, stdout, stderr = module.run_command(cmd_dispel) + + if rc != 0: + module.fail_json(msg="failed to dispel spell(s): %s" + stdout) + + module.exit_json(changed=True, msg="successfully dispelled spell(s)") + else: + module.exit_json(changed=False, msg="spell(s) are already dispelled") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(default=None, aliases=['spell'], type='list', elements='str'), + state=dict(default='present', choices=['present', 'latest', + 'absent', 'cast', 'dispelled', 'rebuild']), + depends=dict(default=None), + update=dict(default=False, type='bool'), + update_cache=dict(default=False, aliases=['update_codex'], type='bool'), + cache_valid_time=dict(default=0, type='int') + ), + required_one_of=[['name', 'update', 'update_cache']], + supports_check_mode=True + ) + + if os.geteuid() != 0: + module.fail_json(msg="root privileges are required for this operation") + + for c in SORCERY: + SORCERY[c] = module.get_bin_path(c, True) + + # prepare environment: run sorcery commands without asking questions + module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') + + params = module.params + + # normalize 'state' parameter + if params['state'] in ('present', 'cast'): + params['state'] = 'present' + elif params['state'] in ('absent', 'dispelled'): + params['state'] = 'absent' + + if params['update']: + update_sorcery(module) + + if params['update_cache'] or params['state'] == 'latest': + update_codex(module) + + if params['name']: + manage_spells(module) + + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/spectrum_device.py b/ansible_collections/community/general/plugins/modules/spectrum_device.py new file mode 100644 index 000000000..5cfc07664 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/spectrum_device.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Renato Orgito +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: spectrum_device +short_description: Creates/deletes devices in CA Spectrum +description: + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 +author: "Renato Orgito (@orgito)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + device: + type: str + aliases: [ host, name ] + required: true + description: + - IP address of the device. + - If a hostname is given, it will be resolved to the IP address. + community: + type: str + description: + - SNMP community used for device discovery. + - Required when I(state=present). + required: true + landscape: + type: str + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + type: str + description: + - On C(present) creates the device when it does not exist. + - On C(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + type: str + aliases: [ oneclick_url ] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form C((http|https)://host.domain[:port]). + url_username: + type: str + aliases: [ oneclick_user ] + required: true + description: + - Oneclick user name. + url_password: + type: str + aliases: [ oneclick_password ] + required: true + description: + - Oneclick user password. + use_proxy: + description: + - if C(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + type: bool + validate_certs: + description: + - If C(false), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + default: true + type: bool + agentport: + type: int + required: false + description: + - UDP port used for SNMP discovery. + default: 161 +notes: + - The devices will be created inside the I(Universe) container of the specified landscape. + - All the operations will be performed only on the specified landscape. +''' + +EXAMPLES = ''' +- name: Add device to CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + community: secret + landscape: '0x100000' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + state: present + + +- name: Remove device from CA Spectrum + local_action: + module: spectrum_device + device: '{{ ansible_host }}' + landscape: '{{ landscape_handle }}' + oneclick_url: http://oneclick.example.com:8080 + oneclick_user: username + oneclick_password: password + use_proxy: false + state: absent +''' + +RETURN = ''' +device: + description: device data when state = present + returned: success + type: dict + sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} +''' + +from socket import gethostbyname, gaierror +import xml.etree.ElementTree as ET + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +def request(resource, xml=None, method=None): + headers = { + "Content-Type": "application/xml", + "Accept": "application/xml" + } + + url = module.params['oneclick_url'] + '/spectrum/restful/' + resource + + response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45) + + if info['status'] == 401: + module.fail_json(msg="failed to authenticate to Oneclick server") + + if info['status'] not in (200, 201, 204): + module.fail_json(msg=info['msg']) + + return response.read() + + +def post(resource, xml=None): + return request(resource, xml=xml, method='POST') + + +def delete(resource): + return request(resource, xml=None, method='DELETE') + + +def get_ip(): + try: + device_ip = gethostbyname(module.params.get('device')) + except gaierror: + module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device')) + + return device_ip + + +def get_device(device_ip): + """Query OneClick for the device using the IP Address""" + resource = '/models' + landscape_min = "0x%x" % int(module.params.get('landscape'), 16) + landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000) + + xml = """ + + + + + + + + + SearchManager + + + + {mh_min} + + + + + {mh_max} + + + + + FIND_DEV_MODELS_BY_IP + + {search_ip} + + + + + + + + """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max) + + result = post(resource, xml=xml) + + root = ET.fromstring(result) + + if root.get('total-models') == '0': + return None + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + + # get the first device + model = root.find('ca:model-responses', namespace).find('ca:model', namespace) + + if model.get('error'): + module.fail_json(msg="error checking device: %s" % model.get('error')) + + # get the attributes + model_handle = model.get('mh') + + model_address = model.find('./*[@id="0x12d7f"]').text + + # derive the landscape handler from the model handler of the device + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=model_address, + landscape=model_landscape) + + return device + + +def add_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device: + module.exit_json(changed=False, device=device) + + if module.check_mode: + device = dict( + model_handle=None, + address=device_ip, + landscape="0x%x" % int(module.params.get('landscape'), 16)) + module.exit_json(changed=True, device=device) + + resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community') + resource += '&landscapeid=' + module.params.get('landscape') + + if module.params.get('agentport', None): + resource += '&agentport=' + str(module.params.get('agentport', 161)) + + result = post(resource) + root = ET.fromstring(result) + + if root.get('error') != 'Success': + module.fail_json(msg=root.get('error-message')) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + model = root.find('ca:model', namespace) + + model_handle = model.get('mh') + model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000) + + device = dict( + model_handle=model_handle, + address=device_ip, + landscape=model_landscape, + ) + + module.exit_json(changed=True, device=device) + + +def remove_device(): + device_ip = get_ip() + device = get_device(device_ip) + + if device is None: + module.exit_json(changed=False) + + if module.check_mode: + module.exit_json(changed=True) + + resource = '/model/' + device['model_handle'] + result = delete(resource) + + root = ET.fromstring(result) + + namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response') + error = root.find('ca:error', namespace).text + + if error != 'Success': + error_message = root.find('ca:error-message', namespace).text + module.fail_json(msg="%s %s" % (error, error_message)) + + module.exit_json(changed=True) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + device=dict(required=True, aliases=['host', 'name']), + landscape=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ? + agentport=dict(type='int', default=161), + url=dict(required=True, aliases=['oneclick_url']), + url_username=dict(required=True, aliases=['oneclick_user']), + url_password=dict(required=True, no_log=True, aliases=['oneclick_password']), + use_proxy=dict(type='bool', default=True), + validate_certs=dict(type='bool', default=True), + ), + required_if=[('state', 'present', ['community'])], + supports_check_mode=True + ) + + if module.params.get('state') == 'present': + add_device() + else: + remove_device() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py new file mode 100644 index 000000000..028ad7f9f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py @@ -0,0 +1,536 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021, Tyler Gates +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: spectrum_model_attrs +short_description: Enforce a model's attributes in CA Spectrum +description: + - This module can be used to enforce a model's attributes in CA Spectrum. +version_added: 2.5.0 +author: + - Tyler Gates (@tgates81) +notes: + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. +requirements: + - 'python >= 2.7' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - if C(false), it will not use a proxy, even if one is defined in + an environment variable on the target hosts. + default: true + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no + man-in-the-middle attack happening. + type: bool + default: true + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - ' C(App_Manufacturer) (C(0x230683))' + - ' C(CollectionsModelNameString) (C(0x12adb))' + - ' C(Condition) (C(0x1000a))' + - ' C(Criticality) (C(0x1290c))' + - ' C(DeviceType) (C(0x23000e))' + - ' C(isManaged) (C(0x1295d))' + - ' C(Model_Class) (C(0x11ee8))' + - ' C(Model_Handle) (C(0x129fa))' + - ' C(Model_Name) (C(0x1006e))' + - ' C(Modeltype_Handle) (C(0x10001))' + - ' C(Modeltype_Name) (C(0x10000))' + - ' C(Network_Address) (C(0x12d7f))' + - ' C(Notes) (C(0x11564))' + - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' + - ' C(TopologyModelNameString) (C(0x129e7))' + - ' C(sysDescr) (C(0x10052))' + - ' C(sysName) (C(0x10b5b))' + - ' C(Vendor_Name) (C(0x11570))' + - ' C(Description) (C(0x230017))' + - Hex IDs are the direct identifiers in Spectrum and will always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be C("") or C(null). + type: str + required: true +''' + +EXAMPLES = r''' +- name: Enforce maintenance mode for modelxyz01 with a note about why + community.general.spectrum_model_attrs: + url: "http://oneclick.url.com" + username: "{{ oneclick_username }}" + password: "{{ oneclick_password }}" + name: "modelxyz01" + type: "Host_Device" + validate_certs: true + attributes: + - name: "isManaged" + value: "false" + - name: "Notes" + value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" + delegate_to: localhost + register: spectrum_model_attrs_status +''' + +RETURN = r''' +msg: + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' +changed_attrs: + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: { + "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", + "isManaged": "true" + } +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import quote +import json +import re +import xml.etree.ElementTree as ET + + +class spectrum_model_attrs: + def __init__(self, module): + self.module = module + self.url = module.params['url'] + # If the user did not define a full path to the restul space in url: + # params, add what we believe it to be. + if not re.search('\\/.+', self.url.split('://')[1]): + self.url = "%s/spectrum/restful" % self.url.rstrip('/') + # Align these with what is defined in OneClick's UI under: + # Locator -> Devices -> By Model Name -> -> + # Attributes tab. + self.attr_map = dict(App_Manufacturer=hex(0x230683), + CollectionsModelNameString=hex(0x12adb), + Condition=hex(0x1000a), + Criticality=hex(0x1290c), + DeviceType=hex(0x23000e), + isManaged=hex(0x1295d), + Model_Class=hex(0x11ee8), + Model_Handle=hex(0x129fa), + Model_Name=hex(0x1006e), + Modeltype_Handle=hex(0x10001), + Modeltype_Name=hex(0x10000), + Network_Address=hex(0x12d7f), + Notes=hex(0x11564), + ServiceDesk_Asset_ID=hex(0x12db9), + TopologyModelNameString=hex(0x129e7), + sysDescr=hex(0x10052), + sysName=hex(0x10b5b), + Vendor_Name=hex(0x11570), + Description=hex(0x230017)) + self.search_qualifiers = [ + "and", "or", "not", "greater-than", "greater-than-or-equals", + "less-than", "less-than-or-equals", "equals", "equals-ignore-case", + "does-not-equal", "does-not-equal-ignore-case", "has-prefix", + "does-not-have-prefix", "has-prefix-ignore-case", + "does-not-have-prefix-ignore-case", "has-substring", + "does-not-have-substring", "has-substring-ignore-case", + "does-not-have-substring-ignore-case", "has-suffix", + "does-not-have-suffix", "has-suffix-ignore-case", + "does-not-have-suffix-ignore-case", "has-pcre", + "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", + "is-derived-from", "not-is-derived-from"] + + self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") + + self.result = dict(msg="", changed_attrs=dict()) + self.success_msg = "Success" + + def build_url(self, path): + """ + Build a sane Spectrum restful API URL + :param path: The path to append to the restful base + :type path: str + :returns: Complete restful API URL + :rtype: str + """ + + return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/')) + + def attr_id(self, name): + """ + Get attribute hex ID + :param name: The name of the attribute to retrieve the hex ID for + :type name: str + :returns: Translated hex ID of name, or None if no translation found + :rtype: str or None + """ + + try: + return self.attr_map[name] + except KeyError: + return None + + def attr_name(self, _id): + """ + Get attribute name from hex ID + :param _id: The hex ID to lookup a name for + :type _id: str + :returns: Translated name of hex ID, or None if no translation found + :rtype: str or None + """ + + for name, m_id in list(self.attr_map.items()): + if _id == m_id: + return name + return None + + def urlencode(self, string): + """ + URL Encode a string + :param: string: The string to URL encode + :type string: str + :returns: URL encode version of supplied string + :rtype: str + """ + + return quote(string, "<>%-_.!*'():?#/@&+,;=") + + def update_model(self, model_handle, attrs): + """ + Update a model's attributes + :param model_handle: The model's handle ID + :type model_handle: str + :param attrs: Model's attributes to update. {'': ''} + :type attrs: dict + :returns: Nothing; exits on error or updates self.results + :rtype: None + """ + + # Build the update URL + update_url = self.build_url("/model/%s?" % model_handle) + for name, val in list(attrs.items()): + if val is None: + # None values should be converted to empty strings + val = "" + val = self.urlencode(str(val)) + if not update_url.endswith('?'): + update_url += "&" + + update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val) + + # POST to /model to update the attributes, or fail. + resp, info = fetch_url(self.module, update_url, method="PUT", + headers={"Content-Type": "application/json", + "Accept": "application/json"}, + use_proxy=self.module.params['use_proxy']) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body) + self.module.fail_json(**self.result) + + # Load and parse the JSON response and either fail or set results. + json_resp = json.loads(body) + """ + Example success response: + {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}" + Example failure response: + {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} + """ # noqa + model_resp = json_resp['model-update-response-list']['model-responses']['model'] + if model_resp['@error'] != "Success": + # I'm not 100% confident on the expected failure structure so just + # dump all of ['attribute']. + self.result['msg'] = str(model_resp['attribute']) + self.module.fail_json(**self.result) + + # Should be OK if we get to here, set results. + self.result['msg'] = self.success_msg + self.result['changed_attrs'].update(attrs) + self.result['changed'] = True + + def find_model(self, search_criteria, ret_attrs=None): + """ + Search for a model in /models + :param search_criteria: The XML + :type search_criteria: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + # Set the XML > tags. If no hex ID + # is found for the name, assume it is already in hex. {name: hex ID} + rqstd_attrs = "" + for ra in ret_attrs: + _id = self.attr_id(ra) or ra + rqstd_attrs += '' % (self.attr_id(ra) or ra) + + # Build the complete XML search query for HTTP POST. + xml = """ + + + + + {0} + + + + {1} + +""".format(search_criteria, rqstd_attrs) + + # POST to /models and fail on errors. + url = self.build_url("/models") + resp, info = fetch_url(self.module, url, data=xml, method="POST", + use_proxy=self.module.params['use_proxy'], + headers={"Content-Type": "application/xml", + "Accept": "application/xml"}) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body) + self.module.fail_json(**self.result) + + # Parse through the XML response and fail on any detected errors. + root = ET.fromstring(body) + total_models = int(root.attrib['total-models']) + error = root.attrib['error'] + model_responses = root.find('ca:model-responses', self.resp_namespace) + if total_models < 1: + self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria + self.module.fail_json(**self.result) + elif total_models > 1: + self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + if error != "EndOfResults": + self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + model = model_responses.find('ca:model', self.resp_namespace) + attrs = model.findall('ca:attribute', self.resp_namespace) + if not attrs: + self.result['msg'] = "No attributes returned." + self.module.fail_json(**self.result) + + # XML response should be successful. Iterate and set each returned + # attribute ID/name and value for return. + ret = dict() + for attr in attrs: + attr_id = attr.get('id') + attr_name = self.attr_name(attr_id) + # Note: all values except empty strings (None) are strings only! + attr_val = attr.text + key = attr_name if attr_name in ret_attrs else attr_id + ret[key] = attr_val + ret_attrs.remove(key) + return ret + + def find_model_by_name_type(self, mname, mtype, ret_attrs=None): + """ + Find a model by name and type + :param mname: Model name + :type mname: str + :param mtype: Model type + :type mtype: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: find_model(): Dictionary mapping of ret_attrs to values: + {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + """This is basically as follows: + + + + + ... + + + + + + + + """ + + # Parent filter tag + filtered_models = ET.Element('filtered-models') + # Logically and + _and = ET.SubElement(filtered_models, 'and') + + # Model Name + MN_equals = ET.SubElement(_and, 'equals') + Model_Name = ET.SubElement(MN_equals, 'attribute', + {'id': self.attr_map['Model_Name']}) + MN_value = ET.SubElement(Model_Name, 'value') + MN_value.text = mname + + # Model Type Name + MTN_equals = ET.SubElement(_and, 'equals') + Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', + {'id': self.attr_map['Modeltype_Name']}) + MTN_value = ET.SubElement(Modeltype_Name, 'value') + MTN_value.text = mtype + + return self.find_model(ET.tostring(filtered_models, + encoding='unicode'), + ret_attrs) + + def ensure_model_attrs(self): + + # Get a list of all requested attribute names/IDs plus Model_Handle and + # use them to query the values currently set. Store finding in a + # dictionary. + req_attrs = [] + for attr in self.module.params['attributes']: + req_attrs.append(attr['name']) + if 'Model_Handle' not in req_attrs: + req_attrs.append('Model_Handle') + + # Survey attributes currently set and store in a dict. + cur_attrs = self.find_model_by_name_type(self.module.params['name'], + self.module.params['type'], + req_attrs) + + # Iterate through the requested attributes names/IDs values pair and + # compare with those currently set. If different, attempt to change. + Model_Handle = cur_attrs.pop("Model_Handle") + for attr in self.module.params['attributes']: + req_name = attr['name'] + req_val = attr['value'] + if req_val == "": + # The API will return None on empty string + req_val = None + if cur_attrs[req_name] != req_val: + if self.module.check_mode: + self.result['changed_attrs'][req_name] = req_val + self.result['msg'] = self.success_msg + self.result['changed'] = True + continue + resp = self.update_model(Model_Handle, {req_name: req_val}) + + self.module.exit_json(**self.result) + + +def run_module(): + argument_spec = dict( + url=dict(type='str', required=True), + url_username=dict(type='str', required=True, aliases=['username']), + url_password=dict(type='str', required=True, aliases=['password'], + no_log=True), + validate_certs=dict(type='bool', default=True), + use_proxy=dict(type='bool', default=True), + name=dict(type='str', required=True), + type=dict(type='str', required=True), + attributes=dict(type='list', + required=True, + elements='dict', + options=dict( + name=dict(type='str', required=True), + value=dict(type='str', required=True) + )), + ) + module = AnsibleModule( + supports_check_mode=True, + argument_spec=argument_spec, + ) + + try: + sm = spectrum_model_attrs(module) + sm.ensure_model_attrs() + except Exception as e: + module.fail_json(msg="Failed to ensure attribute(s) on `%s' with " + "exception: %s" % (module.params['name'], + to_native(e))) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py new file mode 100644 index 000000000..02f2d3c5c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py @@ -0,0 +1,1595 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import (absolute_import, division, print_function) + +DOCUMENTATION = ''' +--- +module: spotinst_aws_elastigroup +short_description: Create, update or delete Spotinst AWS Elastigroups +author: Spotinst (@talzur) +description: + - Can create, update, or delete Spotinst AWS Elastigroups + Launch configuration is part of the elastigroup configuration, + so no additional modules are necessary for handling the launch configuration. + You will have to have a credentials file in this location - /.spotinst/credentials + The credentials file must contain a row that looks like this + token = + Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- +requirements: + - python >= 2.7 + - spotinst_sdk >= 1.0.38 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + + credentials_path: + description: + - Optional parameter that allows to set a non-default credentials path. + default: ~/.spotinst/credentials + type: path + + account_id: + description: + - Optional parameter that allows to set an account-id inside the module configuration. + By default this is retrieved from the credentials path. + type: str + + token: + description: + - A Personal API Access Token issued by Spotinst. + - >- + When not specified, the module will try to obtain it, in that order, from: environment variable C(SPOTINST_TOKEN), or from the credentials path. + type: str + + availability_vs_cost: + description: + - The strategy orientation. + - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." + required: true + type: str + + availability_zones: + description: + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are + name (String), + subnet_id (String), + placement_group_name (String), + required: true + type: list + elements: dict + + block_device_mappings: + description: + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; + You can specify virtual devices and EBS volumes.; + '[{"key":"value", "key":"value"}]'; + keys allowed are + device_name (List of Strings), + virtual_name (String), + no_device (String), + ebs (Object, expects the following keys- + delete_on_termination(Boolean), + encrypted(Boolean), + iops (Integer), + snapshot_id(Integer), + volume_type(String), + volume_size(Integer)) + type: list + elements: dict + + chef: + description: + - The Chef integration configuration.; + Expects the following keys - chef_server (String), + organization (String), + user (String), + pem_key (String), + chef_version (String) + type: dict + + draining_timeout: + description: + - Time for instance to be drained from incoming requests and deregistered from ELB before termination. + type: int + + ebs_optimized: + description: + - Enable EBS optimization for supported instances which are not enabled by default.; + Note - additional charges will be applied. + type: bool + + ebs_volume_pool: + description: + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + volume_ids (List of Strings), + device_name (String) + type: list + elements: dict + + ecs: + description: + - The ECS integration configuration.; + Expects the following key - + cluster_name (String) + type: dict + + elastic_ips: + description: + - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + type: list + elements: str + + fallback_to_od: + description: + - In case of no spots available, Elastigroup will launch an On-demand instance instead + type: bool + + health_check_grace_period: + description: + - The amount of time, in seconds, after the instance has launched to start and check its health. + - If not specified, it defaults to C(300). + type: int + + health_check_unhealthy_duration_before_replacement: + description: + - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + type: int + + health_check_type: + description: + - The service to use for the health check. + - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." + type: str + + iam_role_name: + description: + - The instance profile iamRole name + - Only use iam_role_arn, or iam_role_name + type: str + + iam_role_arn: + description: + - The instance profile iamRole arn + - Only use iam_role_arn, or iam_role_name + type: str + + id: + description: + - The group id if it already exists and you want to update, or delete it. + This will not work unless the uniqueness_by field is set to id. + When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. + type: str + + image_id: + description: + - The image Id used to launch the instance.; + In case of conflict between Instance type and image type, an error will be returned + required: true + type: str + + key_pair: + description: + - Specify a Key Pair to attach to the instances + type: str + + kubernetes: + description: + - The Kubernetes integration configuration. + Expects the following keys - + api_server (String), + token (String) + type: dict + + lifetime_period: + description: + - Lifetime period + type: int + + load_balancers: + description: + - List of classic ELB names + type: list + elements: str + + max_size: + description: + - The upper limit number of instances that you can scale up to + required: true + type: int + + mesosphere: + description: + - The Mesosphere integration configuration. + Expects the following key - + api_server (String) + type: dict + + min_size: + description: + - The lower limit number of instances that you can scale down to + required: true + type: int + + monitoring: + description: + - Describes whether instance Enhanced Monitoring is enabled + type: str + + name: + description: + - Unique name for elastigroup to be created, updated or deleted + required: true + type: str + + network_interfaces: + description: + - A list of hash/dictionaries of network interfaces to add to the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + description (String), + device_index (Integer), + secondary_private_ip_address_count (Integer), + associate_public_ip_address (Boolean), + delete_on_termination (Boolean), + groups (List of Strings), + network_interface_id (String), + private_ip_address (String), + subnet_id (String), + associate_ipv6_address (Boolean), + private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + type: list + elements: dict + + on_demand_count: + description: + - Required if risk is not set + - Number of on demand instances to launch. All other instances will be spot instances.; + Either set this parameter or the risk parameter + type: int + + on_demand_instance_type: + description: + - On-demand instance type that will be provisioned + type: str + + opsworks: + description: + - The elastigroup OpsWorks integration configration.; + Expects the following key - + layer_id (String) + type: dict + + persistence: + description: + - The Stateful elastigroup configration.; + Accepts the following keys - + should_persist_root_device (Boolean), + should_persist_block_devices (Boolean), + should_persist_private_ip (Boolean) + type: dict + + product: + description: + - Operation system type. + - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." + required: true + type: str + + rancher: + description: + - The Rancher integration configuration.; + Expects the following keys - + version (String), + access_key (String), + secret_key (String), + master_host (String) + type: dict + + right_scale: + description: + - The Rightscale integration configuration.; + Expects the following keys - + account_id (String), + refresh_token (String) + type: dict + + risk: + description: + - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + type: int + + roll_config: + description: + - Roll configuration.; + If you would like the group to roll after updating, please use this feature. + Accepts the following keys - + batch_size_percentage(Integer, Required), + grace_period - (Integer, Required), + health_check_type(String, Optional) + type: dict + + scheduled_tasks: + description: + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + adjustment (Integer), + scale_target_capacity (Integer), + scale_min_capacity (Integer), + scale_max_capacity (Integer), + adjustment_percentage (Integer), + batch_size_percentage (Integer), + cron_expression (String), + frequency (String), + grace_period (Integer), + task_type (String, required), + is_enabled (Boolean) + type: list + elements: dict + + security_group_ids: + description: + - One or more security group IDs. ; + In case of update it will override the existing Security Group with the new given array + required: true + type: list + elements: str + + shutdown_script: + description: + - The Base64-encoded shutdown script that executes prior to instance termination. + Encode before setting. + type: str + + signals: + description: + - A list of hash/dictionaries of signals to configure in the elastigroup; + keys allowed are - + name (String, required), + timeout (Integer) + type: list + elements: dict + + spin_up_time: + description: + - Spin up time, in seconds, for the instance + type: int + + spot_instance_types: + description: + - Spot instance type that will be provisioned. + required: true + type: list + elements: str + + state: + choices: + - present + - absent + description: + - Create or delete the elastigroup + default: present + type: str + + tags: + description: + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + type: list + elements: dict + + target: + description: + - The number of instances to launch + required: true + type: int + + target_group_arns: + description: + - List of target group arns instances should be registered to + type: list + elements: str + + tenancy: + description: + - Dedicated vs shared tenancy. + - "The available choices are: C(default), C(dedicated)." + type: str + + terminate_at_end_of_billing_hour: + description: + - Terminate at the end of billing hour + type: bool + + unit: + description: + - The capacity unit to launch instances by. + - "The available choices are: C(instance), C(weight)." + type: str + + up_scaling_policies: + description: + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + metric_name (String, required), + dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), + statistic (String, required) + evaluation_periods (String, required), + period (String, required), + threshold (String, required), + cooldown (String, required), + unit (String, required), + operator (String, required), + action_type (String, required), + adjustment (String), + min_target_capacity (String), + target (String), + maximum (String), + minimum (String) + type: list + elements: dict + + down_scaling_policies: + description: + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + metric_name (String, required), + dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), + statistic (String, required), + evaluation_periods (String, required), + period (String, required), + threshold (String, required), + cooldown (String, required), + unit (String, required), + operator (String, required), + action_type (String, required), + adjustment (String), + max_target_capacity (String), + target (String), + maximum (String), + minimum (String) + type: list + elements: dict + + target_tracking_policies: + description: + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; + '[{"key":"value", "key":"value"}]'; + keys allowed are - + policy_name (String, required), + namespace (String, required), + source (String, required), + metric_name (String, required), + statistic (String, required), + unit (String, required), + cooldown (String, required), + target (String, required) + type: list + elements: dict + + uniqueness_by: + choices: + - id + - name + description: + - If your group names are not unique, you may use this feature to update or delete a specific group. + Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + default: name + type: str + + user_data: + description: + - Base64-encoded MIME user data. Encode before setting the value. + type: str + + utilize_reserved_instances: + description: + - In case of any available Reserved Instances, + Elastigroup will utilize your reservations before purchasing Spot instances. + type: bool + + wait_for_instances: + description: + - Whether or not the elastigroup creation / update actions should wait for the instances to spin + type: bool + default: false + + wait_timeout: + description: + - How long the module should wait for instances before failing the action.; + Only works if wait_for_instances is True. + type: int + + do_not_update: + description: + - TODO document. + type: list + elements: str + default: [] + + multai_token: + description: + - Token used for Multai configuration. + type: str + + multai_load_balancers: + description: + - Configuration parameters for Multai load balancers. + type: list + elements: dict + + elastic_beanstalk: + description: + - Placeholder parameter for future implementation of Elastic Beanstalk configurations. + type: dict + +''' +EXAMPLES = ''' +# Basic configuration YAML example + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + +# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 + register: result + + - name: Store private ips to file + ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - ansible.builtin.debug: var=result + +# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id +# In organizations with more than one account, it is required to specify an account_id + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 + register: result + + - name: Store private ips to file + ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips + with_items: "{{ result.instances }}" + - ansible.builtin.debug: var=result + +# In this example we have set up block device mapping with ephemeral devices + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + +# In this example we create a basic group configuration with a network interface defined. +# Each network interface must have a device index + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target + register: result + - ansible.builtin.debug: var=result + + +# In this example we create a basic group configuration with a target tracking scaling policy defined + +- hosts: localhost + tasks: + - name: Create elastigroup + community.general.spotinst_aws_elastigroup: + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: true + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id + register: result + - ansible.builtin.debug: var=result +''' + +RETURN = ''' +--- +instances: + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: [ + { + "spotInstanceRequestId": "sir-regs25zp", + "instanceId": "i-09640ad8678234c", + "instanceType": "m4.large", + "product": "Linux/UNIX", + "availabilityZone": "us-west-2b", + "privateIp": "180.0.2.244", + "createdAt": "2017-07-17T12:46:18.000Z", + "status": "fulfilled" + } + ] +group_id: + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" + +''' + +HAS_SPOTINST_SDK = False +__metaclass__ = type + +import os +import time +from ansible.module_utils.basic import AnsibleModule + +try: + import spotinst_sdk as spotinst + from spotinst_sdk import SpotinstClientException + + HAS_SPOTINST_SDK = True + +except ImportError: + pass + +eni_fields = ('description', + 'device_index', + 'secondary_private_ip_address_count', + 'associate_public_ip_address', + 'delete_on_termination', + 'groups', + 'network_interface_id', + 'private_ip_address', + 'subnet_id', + 'associate_ipv6_address') + +private_ip_fields = ('private_ip_address', + 'primary') + +capacity_fields = (dict(ansible_field_name='min_size', + spotinst_field_name='minimum'), + dict(ansible_field_name='max_size', + spotinst_field_name='maximum'), + 'target', + 'unit') + +lspec_fields = ('user_data', + 'key_pair', + 'tenancy', + 'shutdown_script', + 'monitoring', + 'ebs_optimized', + 'image_id', + 'health_check_type', + 'health_check_grace_period', + 'health_check_unhealthy_duration_before_replacement', + 'security_group_ids') + +iam_fields = (dict(ansible_field_name='iam_role_name', + spotinst_field_name='name'), + dict(ansible_field_name='iam_role_arn', + spotinst_field_name='arn')) + +scheduled_task_fields = ('adjustment', + 'adjustment_percentage', + 'batch_size_percentage', + 'cron_expression', + 'frequency', + 'grace_period', + 'task_type', + 'is_enabled', + 'scale_target_capacity', + 'scale_min_capacity', + 'scale_max_capacity') + +scaling_policy_fields = ('policy_name', + 'namespace', + 'metric_name', + 'dimensions', + 'statistic', + 'evaluation_periods', + 'period', + 'threshold', + 'cooldown', + 'unit', + 'operator') + +tracking_policy_fields = ('policy_name', + 'namespace', + 'source', + 'metric_name', + 'statistic', + 'unit', + 'cooldown', + 'target', + 'threshold') + +action_fields = (dict(ansible_field_name='action_type', + spotinst_field_name='type'), + 'adjustment', + 'min_target_capacity', + 'max_target_capacity', + 'target', + 'minimum', + 'maximum') + +signal_fields = ('name', + 'timeout') + +multai_lb_fields = ('balancer_id', + 'project_id', + 'target_set_id', + 'az_awareness', + 'auto_weight') + +persistence_fields = ('should_persist_root_device', + 'should_persist_block_devices', + 'should_persist_private_ip') + +strategy_fields = ('risk', + 'utilize_reserved_instances', + 'fallback_to_od', + 'on_demand_count', + 'availability_vs_cost', + 'draining_timeout', + 'spin_up_time', + 'lifetime_period') + +ebs_fields = ('delete_on_termination', + 'encrypted', + 'iops', + 'snapshot_id', + 'volume_type', + 'volume_size') + +bdm_fields = ('device_name', + 'virtual_name', + 'no_device') + +kubernetes_fields = ('api_server', + 'token') + +right_scale_fields = ('account_id', + 'refresh_token') + +rancher_fields = ('access_key', + 'secret_key', + 'master_host', + 'version') + +chef_fields = ('chef_server', + 'organization', + 'user', + 'pem_key', + 'chef_version') + +az_fields = ('name', + 'subnet_id', + 'placement_group_name') + +opsworks_fields = ('layer_id',) + +scaling_strategy_fields = ('terminate_at_end_of_billing_hour',) + +mesosphere_fields = ('api_server',) + +ecs_fields = ('cluster_name',) + +multai_fields = ('multai_token',) + + +def handle_elastigroup(client, module): + has_changed = False + group_id = None + message = 'None' + + name = module.params.get('name') + state = module.params.get('state') + uniqueness_by = module.params.get('uniqueness_by') + external_group_id = module.params.get('id') + + if uniqueness_by == 'id': + if external_group_id is None: + should_create = True + else: + should_create = False + group_id = external_group_id + else: + groups = client.get_elastigroups() + should_create, group_id = find_group_with_same_name(groups, name) + + if should_create is True: + if state == 'present': + eg = expand_elastigroup(module, is_update=False) + module.debug(str(" [INFO] " + message + "\n")) + group = client.create_elastigroup(group=eg) + group_id = group['id'] + message = 'Created group Successfully.' + has_changed = True + + elif state == 'absent': + message = 'Cannot delete non-existent group.' + has_changed = False + else: + eg = expand_elastigroup(module, is_update=True) + + if state == 'present': + group = client.update_elastigroup(group_update=eg, group_id=group_id) + message = 'Updated group successfully.' + + try: + roll_config = module.params.get('roll_config') + if roll_config: + eg_roll = spotinst.aws_elastigroup.Roll( + batch_size_percentage=roll_config.get('batch_size_percentage'), + grace_period=roll_config.get('grace_period'), + health_check_type=roll_config.get('health_check_type') + ) + roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id) + message = 'Updated and started rolling the group successfully.' + + except SpotinstClientException as exc: + message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc) + has_changed = True + + elif state == 'absent': + try: + client.delete_elastigroup(group_id=group_id) + except SpotinstClientException as exc: + if "GROUP_DOESNT_EXIST" in exc.message: + pass + else: + module.fail_json(msg="Error while attempting to delete group : " + exc.message) + + message = 'Deleted group successfully.' + has_changed = True + + return group_id, message, has_changed + + +def retrieve_group_instances(client, module, group_id): + wait_timeout = module.params.get('wait_timeout') + wait_for_instances = module.params.get('wait_for_instances') + + health_check_type = module.params.get('health_check_type') + + if wait_timeout is None: + wait_timeout = 300 + + wait_timeout = time.time() + wait_timeout + target = module.params.get('target') + state = module.params.get('state') + instances = list() + + if state == 'present' and group_id is not None and wait_for_instances is True: + + is_amount_fulfilled = False + while is_amount_fulfilled is False and wait_timeout > time.time(): + instances = list() + amount_of_fulfilled_instances = 0 + + if health_check_type is not None: + healthy_instances = client.get_instance_healthiness(group_id=group_id) + + for healthy_instance in healthy_instances: + if healthy_instance.get('healthStatus') == 'HEALTHY': + amount_of_fulfilled_instances += 1 + instances.append(healthy_instance) + + else: + active_instances = client.get_elastigroup_active_instances(group_id=group_id) + + for active_instance in active_instances: + if active_instance.get('private_ip') is not None: + amount_of_fulfilled_instances += 1 + instances.append(active_instance) + + if amount_of_fulfilled_instances >= target: + is_amount_fulfilled = True + + time.sleep(10) + + return instances + + +def find_group_with_same_name(groups, name): + for group in groups: + if group['name'] == name: + return False, group.get('id') + + return True, None + + +def expand_elastigroup(module, is_update): + do_not_update = module.params['do_not_update'] + name = module.params.get('name') + + eg = spotinst.aws_elastigroup.Elastigroup() + description = module.params.get('description') + + if name is not None: + eg.name = name + if description is not None: + eg.description = description + + # Capacity + expand_capacity(eg, module, is_update, do_not_update) + # Strategy + expand_strategy(eg, module) + # Scaling + expand_scaling(eg, module) + # Third party integrations + expand_integrations(eg, module) + # Compute + expand_compute(eg, module, is_update, do_not_update) + # Multai + expand_multai(eg, module) + # Scheduling + expand_scheduled_tasks(eg, module) + + return eg + + +def expand_compute(eg, module, is_update, do_not_update): + elastic_ips = module.params['elastic_ips'] + on_demand_instance_type = module.params.get('on_demand_instance_type') + spot_instance_types = module.params['spot_instance_types'] + ebs_volume_pool = module.params['ebs_volume_pool'] + availability_zones_list = module.params['availability_zones'] + product = module.params.get('product') + + eg_compute = spotinst.aws_elastigroup.Compute() + + if product is not None: + # Only put product on group creation + if is_update is not True: + eg_compute.product = product + + if elastic_ips is not None: + eg_compute.elastic_ips = elastic_ips + + if on_demand_instance_type or spot_instance_types is not None: + eg_instance_types = spotinst.aws_elastigroup.InstanceTypes() + + if on_demand_instance_type is not None: + eg_instance_types.spot = spot_instance_types + if spot_instance_types is not None: + eg_instance_types.ondemand = on_demand_instance_type + + if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None: + eg_compute.instance_types = eg_instance_types + + expand_ebs_volume_pool(eg_compute, ebs_volume_pool) + + eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone') + + expand_launch_spec(eg_compute, module, is_update, do_not_update) + + eg.compute = eg_compute + + +def expand_ebs_volume_pool(eg_compute, ebs_volumes_list): + if ebs_volumes_list is not None: + eg_volumes = [] + + for volume in ebs_volumes_list: + eg_volume = spotinst.aws_elastigroup.EbsVolume() + + if volume.get('device_name') is not None: + eg_volume.device_name = volume.get('device_name') + if volume.get('volume_ids') is not None: + eg_volume.volume_ids = volume.get('volume_ids') + + if eg_volume.device_name is not None: + eg_volumes.append(eg_volume) + + if len(eg_volumes) > 0: + eg_compute.ebs_volume_pool = eg_volumes + + +def expand_launch_spec(eg_compute, module, is_update, do_not_update): + eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification') + + if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None: + eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole') + + tags = module.params['tags'] + load_balancers = module.params['load_balancers'] + target_group_arns = module.params['target_group_arns'] + block_device_mappings = module.params['block_device_mappings'] + network_interfaces = module.params['network_interfaces'] + + if is_update is True: + if 'image_id' in do_not_update: + delattr(eg_launch_spec, 'image_id') + + expand_tags(eg_launch_spec, tags) + + expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns) + + expand_block_device_mappings(eg_launch_spec, block_device_mappings) + + expand_network_interfaces(eg_launch_spec, network_interfaces) + + eg_compute.launch_specification = eg_launch_spec + + +def expand_integrations(eg, module): + rancher = module.params.get('rancher') + mesosphere = module.params.get('mesosphere') + ecs = module.params.get('ecs') + kubernetes = module.params.get('kubernetes') + right_scale = module.params.get('right_scale') + opsworks = module.params.get('opsworks') + chef = module.params.get('chef') + + integration_exists = False + + eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations() + + if mesosphere is not None: + eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere') + integration_exists = True + + if ecs is not None: + eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration') + integration_exists = True + + if kubernetes is not None: + eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration') + integration_exists = True + + if right_scale is not None: + eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration') + integration_exists = True + + if opsworks is not None: + eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration') + integration_exists = True + + if rancher is not None: + eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher') + integration_exists = True + + if chef is not None: + eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration') + integration_exists = True + + if integration_exists: + eg.third_parties_integration = eg_integrations + + +def expand_capacity(eg, module, is_update, do_not_update): + eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity') + + if is_update is True: + delattr(eg_capacity, 'unit') + + if 'target' in do_not_update: + delattr(eg_capacity, 'target') + + eg.capacity = eg_capacity + + +def expand_strategy(eg, module): + persistence = module.params.get('persistence') + signals = module.params.get('signals') + + eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy') + + terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour') + + if terminate_at_end_of_billing_hour is not None: + eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields, + module.params, 'ScalingStrategy') + + if persistence is not None: + eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence') + + if signals is not None: + eg_signals = expand_list(signals, signal_fields, 'Signal') + + if len(eg_signals) > 0: + eg_strategy.signals = eg_signals + + eg.strategy = eg_strategy + + +def expand_multai(eg, module): + multai_load_balancers = module.params.get('multai_load_balancers') + + eg_multai = expand_fields(multai_fields, module.params, 'Multai') + + if multai_load_balancers is not None: + eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer') + + if len(eg_multai_load_balancers) > 0: + eg_multai.balancers = eg_multai_load_balancers + eg.multai = eg_multai + + +def expand_scheduled_tasks(eg, module): + scheduled_tasks = module.params.get('scheduled_tasks') + + if scheduled_tasks is not None: + eg_scheduling = spotinst.aws_elastigroup.Scheduling() + + eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask') + + if len(eg_tasks) > 0: + eg_scheduling.tasks = eg_tasks + eg.scheduling = eg_scheduling + + +def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns): + if load_balancers is not None or target_group_arns is not None: + eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig() + eg_total_lbs = [] + + if load_balancers is not None: + for elb_name in load_balancers: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if elb_name is not None: + eg_elb.name = elb_name + eg_elb.type = 'CLASSIC' + eg_total_lbs.append(eg_elb) + + if target_group_arns is not None: + for target_arn in target_group_arns: + eg_elb = spotinst.aws_elastigroup.LoadBalancer() + if target_arn is not None: + eg_elb.arn = target_arn + eg_elb.type = 'TARGET_GROUP' + eg_total_lbs.append(eg_elb) + + if len(eg_total_lbs) > 0: + eg_load_balancers_config.load_balancers = eg_total_lbs + eg_launchspec.load_balancers_config = eg_load_balancers_config + + +def expand_tags(eg_launchspec, tags): + if tags is not None: + eg_tags = [] + + for tag in tags: + eg_tag = spotinst.aws_elastigroup.Tag() + if tag: + eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] + + eg_tags.append(eg_tag) + + if len(eg_tags) > 0: + eg_launchspec.tags = eg_tags + + +def expand_block_device_mappings(eg_launchspec, bdms): + if bdms is not None: + eg_bdms = [] + + for bdm in bdms: + eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping') + + if bdm.get('ebs') is not None: + eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS') + + eg_bdms.append(eg_bdm) + + if len(eg_bdms) > 0: + eg_launchspec.block_device_mappings = eg_bdms + + +def expand_network_interfaces(eg_launchspec, enis): + if enis is not None: + eg_enis = [] + + for eni in enis: + eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface') + + eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress') + + if eg_pias is not None: + eg_eni.private_ip_addresses = eg_pias + + eg_enis.append(eg_eni) + + if len(eg_enis) > 0: + eg_launchspec.network_interfaces = eg_enis + + +def expand_scaling(eg, module): + up_scaling_policies = module.params['up_scaling_policies'] + down_scaling_policies = module.params['down_scaling_policies'] + target_tracking_policies = module.params['target_tracking_policies'] + + eg_scaling = spotinst.aws_elastigroup.Scaling() + + if up_scaling_policies is not None: + eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies) + if len(eg_up_scaling_policies) > 0: + eg_scaling.up = eg_up_scaling_policies + + if down_scaling_policies is not None: + eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies) + if len(eg_down_scaling_policies) > 0: + eg_scaling.down = eg_down_scaling_policies + + if target_tracking_policies is not None: + eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies) + if len(eg_target_tracking_policies) > 0: + eg_scaling.target = eg_target_tracking_policies + + if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None: + eg.scaling = eg_scaling + + +def expand_list(items, fields, class_name): + if items is not None: + new_objects_list = [] + for item in items: + new_obj = expand_fields(fields, item, class_name) + new_objects_list.append(new_obj) + + return new_objects_list + + +def expand_fields(fields, item, class_name): + class_ = getattr(spotinst.aws_elastigroup, class_name) + new_obj = class_() + + # Handle primitive fields + if item is not None: + for field in fields: + if isinstance(field, dict): + ansible_field_name = field['ansible_field_name'] + spotinst_field_name = field['spotinst_field_name'] + else: + ansible_field_name = field + spotinst_field_name = field + if item.get(ansible_field_name) is not None: + setattr(new_obj, spotinst_field_name, item.get(ansible_field_name)) + + return new_obj + + +def expand_scaling_policies(scaling_policies): + eg_scaling_policies = [] + + for policy in scaling_policies: + eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy') + eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction') + eg_scaling_policies.append(eg_policy) + + return eg_scaling_policies + + +def expand_target_tracking_policies(tracking_policies): + eg_tracking_policies = [] + + for policy in tracking_policies: + eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy') + eg_tracking_policies.append(eg_policy) + + return eg_tracking_policies + + +def main(): + fields = dict( + account_id=dict(type='str'), + availability_vs_cost=dict(type='str', required=True), + availability_zones=dict(type='list', elements='dict', required=True), + block_device_mappings=dict(type='list', elements='dict'), + chef=dict(type='dict'), + credentials_path=dict(type='path', default="~/.spotinst/credentials"), + do_not_update=dict(default=[], type='list', elements='str'), + down_scaling_policies=dict(type='list', elements='dict'), + draining_timeout=dict(type='int'), + ebs_optimized=dict(type='bool'), + ebs_volume_pool=dict(type='list', elements='dict'), + ecs=dict(type='dict'), + elastic_beanstalk=dict(type='dict'), + elastic_ips=dict(type='list', elements='str'), + fallback_to_od=dict(type='bool'), + id=dict(type='str'), + health_check_grace_period=dict(type='int'), + health_check_type=dict(type='str'), + health_check_unhealthy_duration_before_replacement=dict(type='int'), + iam_role_arn=dict(type='str'), + iam_role_name=dict(type='str'), + image_id=dict(type='str', required=True), + key_pair=dict(type='str', no_log=False), + kubernetes=dict(type='dict'), + lifetime_period=dict(type='int'), + load_balancers=dict(type='list', elements='str'), + max_size=dict(type='int', required=True), + mesosphere=dict(type='dict'), + min_size=dict(type='int', required=True), + monitoring=dict(type='str'), + multai_load_balancers=dict(type='list', elements='dict'), + multai_token=dict(type='str', no_log=True), + name=dict(type='str', required=True), + network_interfaces=dict(type='list', elements='dict'), + on_demand_count=dict(type='int'), + on_demand_instance_type=dict(type='str'), + opsworks=dict(type='dict'), + persistence=dict(type='dict'), + product=dict(type='str', required=True), + rancher=dict(type='dict'), + right_scale=dict(type='dict'), + risk=dict(type='int'), + roll_config=dict(type='dict'), + scheduled_tasks=dict(type='list', elements='dict'), + security_group_ids=dict(type='list', elements='str', required=True), + shutdown_script=dict(type='str'), + signals=dict(type='list', elements='dict'), + spin_up_time=dict(type='int'), + spot_instance_types=dict(type='list', elements='str', required=True), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(type='list', elements='dict'), + target=dict(type='int', required=True), + target_group_arns=dict(type='list', elements='str'), + tenancy=dict(type='str'), + terminate_at_end_of_billing_hour=dict(type='bool'), + token=dict(type='str', no_log=True), + unit=dict(type='str'), + user_data=dict(type='str'), + utilize_reserved_instances=dict(type='bool'), + uniqueness_by=dict(default='name', choices=['name', 'id']), + up_scaling_policies=dict(type='list', elements='dict'), + target_tracking_policies=dict(type='list', elements='dict'), + wait_for_instances=dict(type='bool', default=False), + wait_timeout=dict(type='int') + ) + + module = AnsibleModule(argument_spec=fields) + + if not HAS_SPOTINST_SDK: + module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)") + + # Retrieve creds file variables + creds_file_loaded_vars = dict() + + credentials_path = module.params.get('credentials_path') + + try: + with open(credentials_path, "r") as creds: + for line in creds: + eq_index = line.find('=') + var_name = line[:eq_index].strip() + string_value = line[eq_index + 1:].strip() + creds_file_loaded_vars[var_name] = string_value + except IOError: + pass + # End of creds file retrieval + + token = module.params.get('token') + if not token: + token = os.environ.get('SPOTINST_TOKEN') + if not token: + token = creds_file_loaded_vars.get("token") + + account = module.params.get('account_id') + if not account: + account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT') + if not account: + account = creds_file_loaded_vars.get("account") + + client = spotinst.SpotinstClient(auth_token=token, print_output=False) + + if account is not None: + client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account) + + group_id, message, has_changed = handle_elastigroup(client=client, module=module) + + instances = retrieve_group_instances(client=client, module=module, group_id=group_id) + + module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py b/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py new file mode 100644 index 000000000..32c1cd443 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +short_description: Manage HPE StoreServ 3PAR CPG +author: + - Farhan Nomani (@farhan7500) + - Gautham P Hegde (@gautamphegde) +description: + - Create and delete CPG on HPE 3PAR. +module: ss_3par_cpg +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + cpg_name: + description: + - Name of the CPG. + type: str + required: true + disk_type: + choices: + - FC + - NL + - SSD + description: + - Specifies that physical disks must have the specified device type. + type: str + domain: + description: + - Specifies the name of the domain in which the object will reside. + type: str + growth_increment: + description: + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage + created on each auto-grow operation. + type: str + growth_limit: + description: + - Specifies that the autogrow operation is limited to the specified + storage amount that sets the growth limit(in MiB, GiB or TiB). + type: str + growth_warning: + description: + - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded + results in a warning alert. + type: str + high_availability: + choices: + - PORT + - CAGE + - MAG + description: + - Specifies that the layout must support the failure of one port pair, + one cage, or one magazine. + type: str + raid_type: + choices: + - R0 + - R1 + - R5 + - R6 + description: + - Specifies the RAID type for the logical disk. + type: str + set_size: + description: + - Specifies the set size in the number of chunklets. + type: int + state: + choices: + - present + - absent + description: + - Whether the specified CPG should exist or not. + required: true + type: str + secure: + description: + - Specifies whether the certificate needs to be validated while communicating. + type: bool + default: false +extends_documentation_fragment: +- community.general.hpe3par +- community.general.attributes + +''' + + +EXAMPLES = r''' +- name: Create CPG sample_cpg + community.general.ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: present + cpg_name: sample_cpg + domain: sample_domain + growth_increment: 32000 MiB + growth_limit: 64000 MiB + growth_warning: 48000 MiB + raid_type: R6 + set_size: 8 + high_availability: MAG + disk_type: FC + secure: false + +- name: Delete CPG sample_cpg + community.general.ss_3par_cpg: + storage_system_ip: 10.10.10.1 + storage_system_username: username + storage_system_password: password + state: absent + cpg_name: sample_cpg + secure: false +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par +try: + from hpe3par_sdk import client + from hpe3parclient import exceptions + HAS_3PARCLIENT = True +except ImportError: + HAS_3PARCLIENT = False + + +def validate_set_size(raid_type, set_size): + if raid_type: + set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes'] + if set_size in set_size_array: + return True + return False + + +def cpg_ldlayout_map(ldlayout_dict): + if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']: + ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[ + ldlayout_dict['RAIDType']]['raid_value'] + if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']: + ldlayout_dict['HA'] = getattr( + client.HPE3ParClient, ldlayout_dict['HA']) + return ldlayout_dict + + +def create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type): + try: + if not validate_set_size(raid_type, set_size): + return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type)) + if not client_obj.cpgExists(cpg_name): + + disk_patterns = [] + if disk_type: + disk_type = getattr(client.HPE3ParClient, disk_type) + disk_patterns = [{'diskType': disk_type}] + ld_layout = { + 'RAIDType': raid_type, + 'setSize': set_size, + 'HA': high_availability, + 'diskPatterns': disk_patterns} + ld_layout = cpg_ldlayout_map(ld_layout) + if growth_increment is not None: + growth_increment = hpe3par.convert_to_binary_multiple( + growth_increment) + if growth_limit is not None: + growth_limit = hpe3par.convert_to_binary_multiple( + growth_limit) + if growth_warning is not None: + growth_warning = hpe3par.convert_to_binary_multiple( + growth_warning) + optional = { + 'domain': domain, + 'growthIncrementMiB': growth_increment, + 'growthLimitMiB': growth_limit, + 'usedLDWarningAlertMiB': growth_warning, + 'LDLayout': ld_layout} + client_obj.createCPG(cpg_name, optional) + else: + return (True, False, "CPG already present") + except exceptions.ClientException as e: + return (False, False, "CPG creation failed | %s" % (e)) + return (True, True, "Created CPG %s successfully." % cpg_name) + + +def delete_cpg( + client_obj, + cpg_name): + try: + if client_obj.cpgExists(cpg_name): + client_obj.deleteCPG(cpg_name) + else: + return (True, False, "CPG does not exist") + except exceptions.ClientException as e: + return (False, False, "CPG delete failed | %s" % e) + return (True, True, "Deleted CPG %s successfully." % cpg_name) + + +def main(): + module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(), + required_together=[['raid_type', 'set_size']]) + if not HAS_3PARCLIENT: + module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)') + + if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31: + module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters") + + storage_system_ip = module.params["storage_system_ip"] + storage_system_username = module.params["storage_system_username"] + storage_system_password = module.params["storage_system_password"] + cpg_name = module.params["cpg_name"] + domain = module.params["domain"] + growth_increment = module.params["growth_increment"] + growth_limit = module.params["growth_limit"] + growth_warning = module.params["growth_warning"] + raid_type = module.params["raid_type"] + set_size = module.params["set_size"] + high_availability = module.params["high_availability"] + disk_type = module.params["disk_type"] + secure = module.params["secure"] + + wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip + try: + client_obj = client.HPE3ParClient(wsapi_url, secure) + except exceptions.SSLCertFailed: + module.fail_json(msg="SSL Certificate Failed") + except exceptions.ConnectionError: + module.fail_json(msg="Connection Error") + except exceptions.UnsupportedVersion: + module.fail_json(msg="Unsupported WSAPI version") + except Exception as e: + module.fail_json(msg="Initializing client failed. %s" % e) + + if storage_system_username is None or storage_system_password is None: + module.fail_json(msg="Storage system username or password is None") + if cpg_name is None: + module.fail_json(msg="CPG Name is None") + + # States + if module.params["state"] == "present": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = create_cpg( + client_obj, + cpg_name, + domain, + growth_increment, + growth_limit, + growth_warning, + raid_type, + set_size, + high_availability, + disk_type + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + elif module.params["state"] == "absent": + try: + client_obj.login(storage_system_username, storage_system_password) + return_status, changed, msg = delete_cpg( + client_obj, + cpg_name + ) + except Exception as e: + module.fail_json(msg="CPG create failed | %s" % e) + finally: + client_obj.logout() + + if return_status: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ssh_config.py b/ansible_collections/community/general/plugins/modules/ssh_config.py new file mode 100644 index 000000000..672ac8c47 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ssh_config.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Björn Andersson +# Copyright (c) 2021, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ssh_config +short_description: Manage SSH config for user +version_added: '2.0.0' +description: + - Configures SSH hosts with special C(IdentityFile)s and hostnames. +author: + - Björn Andersson (@gaqzi) + - Abhijeet Kasurde (@Akasurde) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Whether a host entry should exist or not. + default: present + choices: [ 'present', 'absent' ] + type: str + user: + description: + - Which user account this configuration file belongs to. + - If none given and I(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used. + - If a user is given, C(~/.ssh/config) is used. + - Mutually exclusive with I(ssh_config_file). + type: str + group: + description: + - Which group this configuration file belongs to. + - If none given, I(user) is used. + type: str + host: + description: + - The endpoint this configuration is valid for. + - Can be an actual address on the internet or an alias that will + connect to the value of I(hostname). + required: true + type: str + hostname: + description: + - The actual host to connect to when connecting to the host defined. + type: str + port: + description: + - The actual port to connect to when connecting to the host defined. + type: str + remote_user: + description: + - Specifies the user to log in as. + type: str + identity_file: + description: + - The path to an identity file (SSH private key) that will be used + when connecting to this host. + - File need to exist and have mode C(0600) to be valid. + type: path + user_known_hosts_file: + description: + - Sets the user known hosts file option. + type: str + strict_host_key_checking: + description: + - Whether to strictly check the host key when doing connections to the remote host. + choices: [ 'yes', 'no', 'ask' ] + type: str + proxycommand: + description: + - Sets the C(ProxyCommand) option. + - Mutually exclusive with I(proxyjump). + type: str + proxyjump: + description: + - Sets the C(ProxyJump) option. + - Mutually exclusive with I(proxycommand). + type: str + version_added: 6.5.0 + forward_agent: + description: + - Sets the C(ForwardAgent) option. + type: bool + version_added: 4.0.0 + ssh_config_file: + description: + - SSH config file. + - If I(user) and this option are not specified, C(/etc/ssh/ssh_config) is used. + - Mutually exclusive with I(user). + type: path + host_key_algorithms: + description: + - Sets the C(HostKeyAlgorithms) option. + type: str + version_added: 6.1.0 +requirements: +- paramiko +''' + +EXAMPLES = r''' +- name: Add a host in the configuration + community.general.ssh_config: + user: akasurde + host: "example.com" + hostname: "github.com" + identity_file: "/home/akasurde/.ssh/id_rsa" + port: '2223' + state: present + +- name: Delete a host from the configuration + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "example.com" + state: absent +''' + +RETURN = r''' +hosts_added: + description: A list of host added. + returned: success + type: list + sample: ["example.com"] +hosts_removed: + description: A list of host removed. + returned: success + type: list + sample: ["example.com"] +hosts_changed: + description: A list of host changed. + returned: success + type: list + sample: ["example.com"] +hosts_change_diff: + description: A list of host diff changes. + returned: on change + type: list + sample: [ + { + "example.com": { + "new": { + "hostname": "github.com", + "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], + "port": "2224" + }, + "old": { + "hostname": "github.com", + "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], + "port": "2224" + } + } + } + ] +''' + +import os + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR +from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file + + +class SSHConfig(object): + def __init__(self, module): + self.module = module + if not HAS_PARAMIKO: + module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR) + self.params = module.params + self.user = self.params.get('user') + self.group = self.params.get('group') or self.user + self.host = self.params.get('host') + self.config_file = self.params.get('ssh_config_file') + self.identity_file = self.params['identity_file'] + self.check_ssh_config_path() + try: + self.config = ConfigParser(self.config_file) + except FileNotFoundError: + self.module.fail_json(msg="Failed to find %s" % self.config_file) + self.config.load() + + def check_ssh_config_path(self): + self.config_file = determine_config_file(self.user, self.config_file) + + # See if the identity file exists or not, relative to the config file + if os.path.exists(self.config_file) and self.identity_file is not None: + dirname = os.path.dirname(self.config_file) + self.identity_file = os.path.join(dirname, self.identity_file) + + if not os.path.exists(self.identity_file): + self.module.fail_json(msg='IdentityFile %s does not exist' % self.params['identity_file']) + + def ensure_state(self): + hosts_result = self.config.search_host(self.host) + state = self.params['state'] + args = dict( + hostname=self.params.get('hostname'), + port=self.params.get('port'), + identity_file=self.params.get('identity_file'), + user=self.params.get('remote_user'), + strict_host_key_checking=self.params.get('strict_host_key_checking'), + user_known_hosts_file=self.params.get('user_known_hosts_file'), + proxycommand=self.params.get('proxycommand'), + proxyjump=self.params.get('proxyjump'), + host_key_algorithms=self.params.get('host_key_algorithms'), + ) + + # Convert True / False to 'yes' / 'no' for usage in ssh_config + if self.params['forward_agent'] is True: + args['forward_agent'] = 'yes' + if self.params['forward_agent'] is False: + args['forward_agent'] = 'no' + + config_changed = False + hosts_changed = [] + hosts_change_diff = [] + hosts_removed = [] + hosts_added = [] + + hosts_result = [host for host in hosts_result if host['host'] == self.host] + + if hosts_result: + for host in hosts_result: + if state == 'absent': + # Delete host from the configuration + config_changed = True + hosts_removed.append(host['host']) + self.config.delete_host(host['host']) + else: + # Update host in the configuration + changed, options = self.change_host(host['options'], **args) + + if changed: + config_changed = True + self.config.update_host(host['host'], options) + hosts_changed.append(host['host']) + hosts_change_diff.append({ + host['host']: { + 'old': host['options'], + 'new': options, + } + }) + elif state == 'present': + changed, options = self.change_host(dict(), **args) + + if changed: + config_changed = True + hosts_added.append(self.host) + self.config.add_host(self.host, options) + + if config_changed and not self.module.check_mode: + try: + self.config.write_to_ssh_config() + except PermissionError as perm_exec: + self.module.fail_json( + msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec))) + # Make sure we set the permission + perm_mode = '0600' + if self.config_file == '/etc/ssh/ssh_config': + perm_mode = '0644' + self.module.set_mode_if_different(self.config_file, perm_mode, False) + # Make sure the file is owned by the right user and group + self.module.set_owner_if_different(self.config_file, self.user, False) + self.module.set_group_if_different(self.config_file, self.group, False) + + self.module.exit_json(changed=config_changed, + hosts_changed=hosts_changed, + hosts_removed=hosts_removed, + hosts_change_diff=hosts_change_diff, + hosts_added=hosts_added) + + @staticmethod + def change_host(options, **kwargs): + options = deepcopy(options) + changed = False + for k, v in kwargs.items(): + if '_' in k: + k = k.replace('_', '') + + if not v: + if options.get(k): + del options[k] + changed = True + elif options.get(k) != v and not (isinstance(options.get(k), list) and v in options.get(k)): + options[k] = v + changed = True + + return changed, options + + +def main(): + module = AnsibleModule( + argument_spec=dict( + group=dict(default=None, type='str'), + host=dict(type='str', required=True), + hostname=dict(type='str'), + host_key_algorithms=dict(type='str', no_log=False), + identity_file=dict(type='path'), + port=dict(type='str'), + proxycommand=dict(type='str', default=None), + proxyjump=dict(type='str', default=None), + forward_agent=dict(type='bool'), + remote_user=dict(type='str'), + ssh_config_file=dict(default=None, type='path'), + state=dict(type='str', default='present', choices=['present', 'absent']), + strict_host_key_checking=dict( + default=None, + choices=['yes', 'no', 'ask'] + ), + user=dict(default=None, type='str'), + user_known_hosts_file=dict(type='str', default=None), + ), + supports_check_mode=True, + mutually_exclusive=[ + ['user', 'ssh_config_file'], + ['proxycommand', 'proxyjump'], + ], + ) + + ssh_config_obj = SSHConfig(module) + ssh_config_obj.ensure_state() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/stackdriver.py b/ansible_collections/community/general/plugins/modules/stackdriver.py new file mode 100644 index 000000000..cf7cb2f47 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/stackdriver.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: stackdriver +short_description: Send code deploy and annotation events to stackdriver +description: + - Send code deploy and annotation events to Stackdriver +author: "Ben Whaley (@bwhaley)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - API key. + required: true + event: + type: str + description: + - The type of event to send, either annotation or deploy + choices: ['annotation', 'deploy'] + required: true + revision_id: + type: str + description: + - The revision of the code that was deployed. Required for deploy events + deployed_by: + type: str + description: + - The person or robot responsible for deploying the code + default: "Ansible" + deployed_to: + type: str + description: + - "The environment code was deployed to. (ie: development, staging, production)" + repository: + type: str + description: + - The repository (or project) deployed + msg: + type: str + description: + - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. + annotated_by: + type: str + description: + - The person or robot who the annotation should be attributed to. + default: "Ansible" + level: + type: str + description: + - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. + choices: ['INFO', 'WARN', 'ERROR'] + default: 'INFO' + instance_id: + type: str + description: + - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown + event_epoch: + type: str + description: + - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." +''' + +EXAMPLES = ''' +- name: Send a code deploy event to stackdriver + community.general.stackdriver: + key: AAAAAA + event: deploy + deployed_to: production + deployed_by: leeroyjenkins + repository: MyWebApp + revision_id: abcd123 + +- name: Send an annotation event to stackdriver + community.general.stackdriver: + key: AAAAAA + event: annotation + msg: Greetings from Ansible + annotated_by: leeroyjenkins + level: WARN + instance_id: i-abcd1234 +''' + +# =========================================== +# Stackdriver module specific support methods. +# + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import fetch_url + + +def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): + """Send a deploy event to Stackdriver""" + deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" + + params = {} + params['revision_id'] = revision_id + params['deployed_by'] = deployed_by + if deployed_to: + params['deployed_to'] = deployed_to + if repository: + params['repository'] = repository + + return do_send_request(module, deploy_api, params, key) + + +def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): + """Send an annotation event to Stackdriver""" + annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" + + params = {} + params['message'] = msg + if annotated_by: + params['annotated_by'] = annotated_by + if level: + params['level'] = level + if instance_id: + params['instance_id'] = instance_id + if event_epoch: + params['event_epoch'] = event_epoch + + return do_send_request(module, annotation_api, params, key) + + +def do_send_request(module, url, params, key): + data = json.dumps(params) + headers = { + 'Content-Type': 'application/json', + 'x-stackdriver-apikey': key + } + response, info = fetch_url(module, url, headers=headers, data=data, method='POST') + if info['status'] != 200: + module.fail_json(msg="Unable to send msg: %s" % info['msg']) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( # @TODO add types + key=dict(required=True, no_log=True), + event=dict(required=True, choices=['deploy', 'annotation']), + msg=dict(), + revision_id=dict(), + annotated_by=dict(default='Ansible'), + level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), + instance_id=dict(), + event_epoch=dict(), # @TODO int? + deployed_by=dict(default='Ansible'), + deployed_to=dict(), + repository=dict(), + ), + supports_check_mode=True + ) + + key = module.params["key"] + event = module.params["event"] + + # Annotation params + msg = module.params["msg"] + annotated_by = module.params["annotated_by"] + level = module.params["level"] + instance_id = module.params["instance_id"] + event_epoch = module.params["event_epoch"] + + # Deploy params + revision_id = module.params["revision_id"] + deployed_by = module.params["deployed_by"] + deployed_to = module.params["deployed_to"] + repository = module.params["repository"] + + ################################################################## + # deploy requires revision_id + # annotation requires msg + # We verify these manually + ################################################################## + + if event == 'deploy': + if not revision_id: + module.fail_json(msg="revision_id required for deploy events") + try: + send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) + except Exception as e: + module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), + exception=traceback.format_exc()) + + if event == 'annotation': + if not msg: + module.fail_json(msg="msg required for annotation events") + try: + send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) + except Exception as e: + module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), + exception=traceback.format_exc()) + + changed = True + module.exit_json(changed=changed, deployed_by=deployed_by) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/stacki_host.py b/ansible_collections/community/general/plugins/modules/stacki_host.py new file mode 100644 index 000000000..e286bc961 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/stacki_host.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Hugh Ma +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: stacki_host +short_description: Add or remove host to stacki front-end +description: + - Use this module to add or remove hosts to a stacki front-end via API. + - Information on stacki can be found at U(https://github.com/StackIQ/stacki). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of the host to be added to Stacki. + required: true + type: str + stacki_user: + description: + - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead. + required: true + type: str + stacki_password: + description: + - Password for authenticating with Stacki API, but if not + specified, the environment variable C(stacki_password) is used instead. + required: true + type: str + stacki_endpoint: + description: + - URL for the Stacki API Endpoint. + required: true + type: str + prim_intf_mac: + description: + - MAC Address for the primary PXE boot network interface. + - Currently not used by the module. + type: str + prim_intf_ip: + description: + - IP Address for the primary network interface. + - Currently not used by the module. + type: str + prim_intf: + description: + - Name of the primary network interface. + - Currently not used by the module. + type: str + force_install: + description: + - Set value to C(true) to force node into install state if it already exists in stacki. + type: bool + default: false + state: + description: + - Set value to the desired state for the specified host. + type: str + choices: [ absent, present ] + default: present + appliance: + description: + - Applicance to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: str + default: backend + rack: + description: + - Rack to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: int + default: 0 + rank: + description: + - Rank to be used in host creation. + - In Stacki terminology, the rank is the position of the machine in a rack. + - Required if I(state) is C(present) and host does not yet exist. + type: int + default: 0 + network: + description: + - Network to be configured in the host. + - Currently not used by the module. + type: str + default: private +author: +- Hugh Ma (@bbyhuy) +''' + +EXAMPLES = ''' +- name: Add a host named test-1 + community.general.stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + prim_intf_mac: mac_addr + prim_intf_ip: x.x.x.x + prim_intf: eth0 + +- name: Remove a host named test-1 + community.general.stacki_host: + name: test-1 + stacki_user: usr + stacki_password: pwd + stacki_endpoint: url + state: absent +''' + +RETURN = ''' +changed: + description: response to whether or not the api call completed successfully + returned: always + type: bool + sample: true + +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: the value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +''' + +import json + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +class StackiHost(object): + + def __init__(self, module): + self.module = module + self.hostname = module.params['name'] + self.rack = module.params['rack'] + self.rank = module.params['rank'] + self.appliance = module.params['appliance'] + self.prim_intf = module.params['prim_intf'] + self.prim_intf_ip = module.params['prim_intf_ip'] + self.network = module.params['network'] + self.prim_intf_mac = module.params['prim_intf_mac'] + self.endpoint = module.params['stacki_endpoint'] + + auth_creds = {'USERNAME': module.params['stacki_user'], + 'PASSWORD': module.params['stacki_password']} + + # Get Initial CSRF + cred_a = self.do_request(self.endpoint, method="GET") + cookie_a = cred_a.headers.get('Set-Cookie').split(';') + init_csrftoken = None + for c in cookie_a: + if "csrftoken" in c: + init_csrftoken = c.replace("csrftoken=", "") + init_csrftoken = init_csrftoken.rstrip("\r\n") + break + + # Make Header Dictionary with initial CSRF + header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, + 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')} + + # Endpoint to get final authentication header + login_endpoint = self.endpoint + "/login" + + # Get Final CSRF and Session ID + login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') + + cookie_f = login_req.headers.get('Set-Cookie').split(';') + csrftoken = None + for f in cookie_f: + if "csrftoken" in f: + csrftoken = f.replace("csrftoken=", "") + if "sessionid" in f: + sessionid = c.split("sessionid=", 1)[-1] + sessionid = sessionid.rstrip("\r\n") + + self.header = {'csrftoken': csrftoken, + 'X-CSRFToken': csrftoken, + 'sessionid': sessionid, + 'Content-type': 'application/json', + 'Cookie': login_req.headers.get('Set-Cookie')} + + def do_request(self, url, payload=None, headers=None, method=None): + res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) + + if info['status'] != 200: + self.module.fail_json(changed=False, msg=info['msg']) + + return res + + def stack_check_host(self): + res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + return self.hostname in res.read() + + def stack_sync(self): + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + + def stack_force_install(self, result): + data = {'cmd': "set host boot {0} action=install".format(self.hostname)} + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + changed = True + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_add(self, result): + data = dict() + changed = False + + data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ + .format(self.hostname, self.rack, self.rank, self.appliance) + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = changed + result['stdout'] = "api call successful".rstrip("\r\n") + + def stack_remove(self, result): + data = dict() + + data['cmd'] = "remove host {0}"\ + .format(self.hostname) + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + + self.stack_sync() + + result['changed'] = True + result['stdout'] = "api call successful".rstrip("\r\n") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + name=dict(type='str', required=True), + rack=dict(type='int', default=0), + rank=dict(type='int', default=0), + appliance=dict(type='str', default='backend'), + prim_intf=dict(type='str'), + prim_intf_ip=dict(type='str'), + network=dict(type='str', default='private'), + prim_intf_mac=dict(type='str'), + stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])), + stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True), + stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])), + force_install=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + + result = {'changed': False} + missing_params = list() + + stacki = StackiHost(module) + host_exists = stacki.stack_check_host() + + # If state is present, but host exists, need force_install flag to put host back into install state + if module.params['state'] == 'present' and host_exists and module.params['force_install']: + stacki.stack_force_install(result) + # If state is present, but host exists, and force_install and false, do nothing + elif module.params['state'] == 'present' and host_exists and not module.params['force_install']: + result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\ + .format(module.params['name']) + # Otherwise, state is present, but host doesn't exists, require more params to add host + elif module.params['state'] == 'present' and not host_exists: + for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: + if not module.params[param]: + missing_params.append(param) + if len(missing_params) > 0: # @FIXME replace with required_if + module.fail_json(msg="missing required arguments: {0}".format(missing_params)) + + stacki.stack_add(result) + # If state is absent, and host exists, lets remove it. + elif module.params['state'] == 'absent' and host_exists: + stacki.stack_remove(result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/statsd.py b/ansible_collections/community/general/plugins/modules/statsd.py new file mode 100644 index 000000000..65d33b709 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/statsd.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: statsd +short_description: Send metrics to StatsD +version_added: 2.1.0 +description: + - The C(statsd) module sends metrics to StatsD. + - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). + - Supported metric types are C(counter) and C(gauge). + Currently unupported metric types are C(timer), C(set), and C(gaugedelta). +author: "Mark Mercado (@mamercad)" +requirements: + - statsd +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - State of the check, only C(present) makes sense. + choices: ["present"] + default: present + host: + type: str + default: localhost + description: + - StatsD host (hostname or IP) to send metrics to. + port: + type: int + default: 8125 + description: + - The port on C(host) which StatsD is listening on. + protocol: + type: str + default: udp + choices: ["udp", "tcp"] + description: + - The transport protocol to send metrics over. + timeout: + type: float + default: 1.0 + description: + - Sender timeout, only applicable if C(protocol) is C(tcp). + metric: + type: str + required: true + description: + - The name of the metric. + metric_type: + type: str + required: true + choices: ["counter", "gauge"] + description: + - The type of metric. + metric_prefix: + type: str + description: + - The prefix to add to the metric. + default: '' + value: + type: int + required: true + description: + - The value of the metric. + delta: + type: bool + default: false + description: + - If the metric is of type C(gauge), change the value by C(delta). +''' + +EXAMPLES = ''' +- name: Increment the metric my_counter by 1 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_counter + metric_type: counter + value: 1 + +- name: Set the gauge my_gauge to 7 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_gauge + metric_type: gauge + value: 7 +''' + + +from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) + +try: + from statsd import StatsClient, TCPStatsClient + HAS_STATSD = True +except ImportError: + HAS_STATSD = False + + +def udp_statsd_client(**client_params): + return StatsClient(**client_params) + + +def tcp_statsd_client(**client_params): + return TCPStatsClient(**client_params) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present']), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=8125), + protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), + timeout=dict(type='float', default=1.0), + metric=dict(type='str', required=True), + metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), + metric_prefix=dict(type='str', default=''), + value=dict(type='int', required=True), + delta=dict(type='bool', default=False), + ), + supports_check_mode=False + ) + + if not HAS_STATSD: + module.fail_json(msg=missing_required_lib('statsd')) + + host = module.params.get('host') + port = module.params.get('port') + protocol = module.params.get('protocol') + timeout = module.params.get('timeout') + metric = module.params.get('metric') + metric_type = module.params.get('metric_type') + metric_prefix = module.params.get('metric_prefix') + value = module.params.get('value') + delta = module.params.get('delta') + + if protocol == 'udp': + client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) + elif protocol == 'tcp': + client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) + + metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric + metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value + + try: + if metric_type == 'counter': + client.incr(metric, value) + elif metric_type == 'gauge': + client.gauge(metric, value, delta=delta) + + except Exception as exc: + module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) + + finally: + if protocol == 'tcp': + client.close() + + module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py new file mode 100644 index 000000000..31b422453 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py @@ -0,0 +1,475 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Benjamin Copeland (@bhcopeland) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: statusio_maintenance +short_description: Create maintenance windows for your status.io dashboard +description: + - Creates a maintenance window for status.io + - Deletes a maintenance window for status.io +notes: + - You can use the apiary API url (http://docs.statusio.apiary.io/) to + capture API traffic + - Use start_date and start_time with minutes to set future maintenance window +author: Benjamin Copeland (@bhcopeland) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + title: + type: str + description: + - A descriptive title for the maintenance window + default: "A new maintenance window" + desc: + type: str + description: + - Message describing the maintenance window + default: "Created by Ansible" + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + type: str + description: + - Your unique API ID from status.io + required: true + api_key: + type: str + description: + - Your unique API Key from status.io + required: true + statuspage: + type: str + description: + - Your unique StatusPage ID from status.io + required: true + url: + type: str + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + type: list + elements: str + description: + - The given name of your component (server name) + aliases: ['component'] + containers: + type: list + elements: str + description: + - The given name of your container (data center) + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers + type: bool + default: false + automation: + description: + - Automatically start and end the maintenance window + type: bool + default: false + maintenance_notify_now: + description: + - Notify subscribers now + type: bool + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time + type: bool + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time + type: bool + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time + type: bool + default: false + maintenance_id: + type: str + description: + - The maintenance id number when deleting a maintenance window + minutes: + type: int + description: + - The length of time in UTC that the maintenance will run + (starting from playbook runtime) + default: 10 + start_date: + type: str + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC) + - End Date is worked out from start_date + minutes + start_time: + type: str + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC) + - End Time is worked out from start_time + minutes +''' + +EXAMPLES = ''' +- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance + community.general.statusio_maintenance: + title: Router Upgrade from ansible + desc: Performing a Router Upgrade + components: server1.example.com + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: true + automation: true + +- name: Create a maintenance window for 60 minutes on server1 and server2 + community.general.statusio_maintenance: + title: Routine maintenance + desc: Some security updates + components: + - server1.example.com + - server2.example.com + minutes: 60 + api_id: api_id + api_key: api_key + statuspage: statuspage_id + maintenance_notify_1_hr: true + automation: true + delegate_to: localhost + +- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center + community.general.statusio_maintenance: + title: Data center downtime + desc: Performing a Upgrade to our data center + components: Primary Data Center + api_id: api_id + api_key: api_key + statuspage: statuspage_id + start_date: 01/01/2016 + start_time: 12:00 + minutes: 1440 + +- name: Delete a maintenance window + community.general.statusio_maintenance: + title: Remove a maintenance window + maintenance_id: 561f90faf74bc94a4700087b + statuspage: statuspage_id + api_id: api_id + api_key: api_key + state: absent + +''' +# TODO: Add RETURN documentation. +RETURN = ''' # ''' + +import datetime +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.urls import open_url + + +def get_api_auth_headers(api_id, api_key, url, statuspage): + + headers = { + "x-api-id": api_id, + "x-api-key": api_key, + "Content-Type": "application/json" + } + + try: + response = open_url( + url + "/v2/component/list/" + statuspage, headers=headers) + data = json.loads(response.read()) + if data['status']['message'] == 'Authentication failed': + return 1, None, None, "Authentication failed: " \ + "Check api_id/api_key and statuspage id." + else: + auth_headers = headers + auth_content = data + except Exception as e: + return 1, None, None, to_native(e) + return 0, auth_headers, auth_content, None + + +def get_component_ids(auth_content, components): + host_ids = [] + lower_components = [x.lower() for x in components] + for result in auth_content["result"]: + if result['name'].lower() in lower_components: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_components.remove(result['name'].lower()) + if len(lower_components): + # items not found in the api + return 1, None, lower_components + return 0, host_ids, None + + +def get_container_ids(auth_content, containers): + host_ids = [] + lower_containers = [x.lower() for x in containers] + for result in auth_content["result"]: + if result["containers"][0]["name"].lower() in lower_containers: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_containers.remove(result["containers"][0]["name"].lower()) + + if len(lower_containers): + # items not found in the api + return 1, None, lower_containers + return 0, host_ids, None + + +def get_date_time(start_date, start_time, minutes): + returned_date = [] + if start_date and start_time: + try: + datetime.datetime.strptime(start_date, '%m/%d/%Y') + returned_date.append(start_date) + except (NameError, ValueError): + return 1, None, "Not a valid start_date format." + try: + datetime.datetime.strptime(start_time, '%H:%M') + returned_date.append(start_time) + except (NameError, ValueError): + return 1, None, "Not a valid start_time format." + try: + # Work out end date/time based on minutes + date_time_start = datetime.datetime.strptime( + start_time + start_date, '%H:%M%m/%d/%Y') + delta = date_time_start + datetime.timedelta(minutes=minutes) + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + except (NameError, ValueError): + return 1, None, "Couldn't work out a valid date" + else: + now = datetime.datetime.utcnow() + delta = now + datetime.timedelta(minutes=minutes) + # start_date + returned_date.append(now.strftime("%m/%d/%Y")) + returned_date.append(now.strftime("%H:%M")) + # end_date + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + return 0, returned_date, None + + +def create_maintenance(auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, title, desc, + returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr): + returned_dates = [[x] for x in returned_date] + component_id = [] + container_id = [] + for val in host_ids: + component_id.append(val['component_id']) + container_id.append(val['container_id']) + try: + values = json.dumps({ + "statuspage_id": statuspage, + "components": component_id, + "containers": container_id, + "all_infrastructure_affected": str(int(all_infrastructure_affected)), + "automation": str(int(automation)), + "maintenance_name": title, + "maintenance_details": desc, + "date_planned_start": returned_dates[0], + "time_planned_start": returned_dates[1], + "date_planned_end": returned_dates[2], + "time_planned_end": returned_dates[3], + "maintenance_notify_now": str(int(maintenance_notify_now)), + "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), + "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), + "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) + }) + response = open_url( + url + "/v2/maintenance/schedule", data=values, + headers=auth_headers) + data = json.loads(response.read()) + + if data["status"]["error"] == "yes": + return 1, None, data["status"]["message"] + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def delete_maintenance(auth_headers, url, statuspage, maintenance_id): + try: + values = json.dumps({ + "statuspage_id": statuspage, + "maintenance_id": maintenance_id, + }) + response = open_url( + url=url + "/v2/maintenance/delete", + data=values, + headers=auth_headers) + data = json.loads(response.read()) + if data["status"]["error"] == "yes": + return 1, None, "Invalid maintenance_id" + except Exception as e: + return 1, None, to_native(e) + return 0, None, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_id=dict(required=True), + api_key=dict(required=True, no_log=True), + statuspage=dict(required=True), + state=dict(required=False, default='present', + choices=['present', 'absent']), + url=dict(default='https://api.status.io', required=False), + components=dict(type='list', elements='str', required=False, default=None, + aliases=['component']), + containers=dict(type='list', elements='str', required=False, default=None, + aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False, + required=False), + automation=dict(type='bool', default=False, required=False), + title=dict(required=False, default='A new maintenance window'), + desc=dict(required=False, default='Created by Ansible'), + minutes=dict(type='int', required=False, default=10), + maintenance_notify_now=dict(type='bool', default=False, + required=False), + maintenance_notify_72_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_24_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_1_hr=dict(type='bool', default=False, + required=False), + maintenance_id=dict(required=False, default=None), + start_date=dict(default=None, required=False), + start_time=dict(default=None, required=False) + ), + supports_check_mode=True, + ) + + api_id = module.params['api_id'] + api_key = module.params['api_key'] + statuspage = module.params['statuspage'] + state = module.params['state'] + url = module.params['url'] + components = module.params['components'] + containers = module.params['containers'] + all_infrastructure_affected = module.params['all_infrastructure_affected'] + automation = module.params['automation'] + title = module.params['title'] + desc = module.params['desc'] + minutes = module.params['minutes'] + maintenance_notify_now = module.params['maintenance_notify_now'] + maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] + maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] + maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] + maintenance_id = module.params['maintenance_id'] + start_date = module.params['start_date'] + start_time = module.params['start_time'] + + if state == "present": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + auth_content = {} + + if minutes or start_time and start_date: + (rc, returned_date, error) = get_date_time( + start_date, start_time, minutes) + if rc != 0: + module.fail_json(msg="Failed to set date/time: %s" % error) + + if not components and not containers: + return module.fail_json(msg="A Component or Container must be " + "defined") + elif components and containers: + return module.fail_json(msg="Components and containers cannot " + "be used together") + else: + if components: + (rc, host_ids, error) = get_component_ids(auth_content, + components) + if rc != 0: + module.fail_json(msg="Failed to find component %s" % error) + + if containers: + (rc, host_ids, error) = get_container_ids(auth_content, + containers) + if rc != 0: + module.fail_json(msg="Failed to find container %s" % error) + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, dummy, error) = create_maintenance( + auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, + title, desc, returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr) + if rc == 0: + module.exit_json(changed=True, result="Successfully created " + "maintenance") + else: + module.fail_json(msg="Failed to create maintenance: %s" + % error) + + if state == "absent": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, dummy, error) = delete_maintenance( + auth_headers, url, statuspage, maintenance_id) + if rc == 0: + module.exit_json( + changed=True, + result="Successfully deleted maintenance" + ) + else: + module.fail_json( + msg="Failed to delete maintenance: %s" % error) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sudoers.py b/ansible_collections/community/general/plugins/modules/sudoers.py new file mode 100644 index 000000000..fd8289b1c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sudoers.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +# Copyright (c) 2019, Jon Ellis (@JonEllis) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: sudoers +short_description: Manage sudoers files +version_added: "4.3.0" +description: + - This module allows for the manipulation of sudoers files. +author: + - "Jon Ellis (@JonEllis) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + commands: + description: + - The commands allowed by the sudoers rule. + - Multiple can be added by passing a list of commands. + - Use C(ALL) for all commands. + type: list + elements: str + group: + description: + - The name of the group for the sudoers rule. + - This option cannot be used in conjunction with I(user). + type: str + name: + required: true + description: + - The name of the sudoers rule. + - This will be used for the filename for the sudoers file managed by this rule. + type: str + nopassword: + description: + - Whether a password will be required to run the sudo'd command. + default: true + type: bool + setenv: + description: + - Whether to allow keeping the environment when command is run with sudo. + default: false + type: bool + version_added: 6.3.0 + host: + description: + - Specify the host the rule is for. + default: ALL + type: str + version_added: 6.2.0 + runas: + description: + - Specify the target user the command(s) will run as. + type: str + version_added: 4.7.0 + sudoers_path: + description: + - The path which sudoers config files will be managed in. + default: /etc/sudoers.d + type: str + state: + default: "present" + choices: + - present + - absent + description: + - Whether the rule should exist or not. + type: str + user: + description: + - The name of the user for the sudoers rule. + - This option cannot be used in conjunction with I(group). + type: str + validation: + description: + - If C(absent), the sudoers rule will be added without validation. + - If C(detect) and visudo is available, then the sudoers rule will be validated by visudo. + - If C(required), visudo must be available to validate the sudoers rule. + type: str + default: detect + choices: [ absent, detect, required ] + version_added: 5.2.0 +''' + +EXAMPLES = ''' +- name: Allow the backup user to sudo /usr/local/bin/backup + community.general.sudoers: + name: allow-backup + state: present + user: backup + commands: /usr/local/bin/backup + +- name: Allow the bob user to run any commands as alice with sudo -u alice + community.general.sudoers: + name: bob-do-as-alice + state: present + user: bob + runas: alice + commands: ALL + +- name: >- + Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics + without requiring a password on the host called webserver + community.general.sudoers: + name: monitor-app + group: monitoring + host: webserver + commands: /usr/local/bin/gather-app-metrics + +- name: >- + Allow the alice user to run sudo /bin/systemctl restart my-service or + sudo /bin/systemctl reload my-service, but a password is required + community.general.sudoers: + name: alice-service + user: alice + commands: + - /bin/systemctl restart my-service + - /bin/systemctl reload my-service + nopassword: false + +- name: Revoke the previous sudo grants given to the alice user + community.general.sudoers: + name: alice-service + state: absent + +- name: Allow alice to sudo /usr/local/bin/upload and keep env variables + community.general.sudoers: + name: allow-alice-upload + user: alice + commands: /usr/local/bin/upload + setenv: true +''' + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Sudoers(object): + + FILE_MODE = 0o440 + + def __init__(self, module): + self.module = module + + self.check_mode = module.check_mode + self.name = module.params['name'] + self.user = module.params['user'] + self.group = module.params['group'] + self.state = module.params['state'] + self.nopassword = module.params['nopassword'] + self.setenv = module.params['setenv'] + self.host = module.params['host'] + self.runas = module.params['runas'] + self.sudoers_path = module.params['sudoers_path'] + self.file = os.path.join(self.sudoers_path, self.name) + self.commands = module.params['commands'] + self.validation = module.params['validation'] + + def write(self): + if self.check_mode: + return + + with open(self.file, 'w') as f: + f.write(self.content()) + + os.chmod(self.file, self.FILE_MODE) + + def delete(self): + if self.check_mode: + return + + os.remove(self.file) + + def exists(self): + return os.path.exists(self.file) + + def matches(self): + with open(self.file, 'r') as f: + content_matches = f.read() == self.content() + + current_mode = os.stat(self.file).st_mode & 0o777 + mode_matches = current_mode == self.FILE_MODE + + return content_matches and mode_matches + + def content(self): + if self.user: + owner = self.user + elif self.group: + owner = '%{group}'.format(group=self.group) + + commands_str = ', '.join(self.commands) + nopasswd_str = 'NOPASSWD:' if self.nopassword else '' + setenv_str = 'SETENV:' if self.setenv else '' + runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else '' + return "{owner} {host}={runas}{nopasswd}{setenv} {commands}\n".format( + owner=owner, + host=self.host, + runas=runas_str, + nopasswd=nopasswd_str, + setenv=setenv_str, + commands=commands_str + ) + + def validate(self): + if self.validation == 'absent': + return + + visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required') + if visudo_path is None: + return + + check_command = [visudo_path, '-c', '-f', '-'] + rc, stdout, stderr = self.module.run_command(check_command, data=self.content()) + + if rc != 0: + raise Exception('Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout)) + + def run(self): + if self.state == 'absent': + if self.exists(): + self.delete() + return True + else: + return False + + self.validate() + + if self.exists() and self.matches(): + return False + + self.write() + return True + + +def main(): + argument_spec = { + 'commands': { + 'type': 'list', + 'elements': 'str', + }, + 'group': {}, + 'name': { + 'required': True, + }, + 'nopassword': { + 'type': 'bool', + 'default': True, + }, + 'setenv': { + 'type': 'bool', + 'default': False, + }, + 'host': { + 'type': 'str', + 'default': 'ALL', + }, + 'runas': { + 'type': 'str', + 'default': None, + }, + 'sudoers_path': { + 'type': 'str', + 'default': '/etc/sudoers.d', + }, + 'state': { + 'default': 'present', + 'choices': ['present', 'absent'], + }, + 'user': {}, + 'validation': { + 'default': 'detect', + 'choices': ['absent', 'detect', 'required'] + }, + } + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[['user', 'group']], + supports_check_mode=True, + required_if=[('state', 'present', ['commands'])], + ) + + sudoers = Sudoers(module) + + try: + changed = sudoers.run() + module.exit_json(changed=changed) + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/supervisorctl.py b/ansible_collections/community/general/plugins/modules/supervisorctl.py new file mode 100644 index 000000000..e9df16108 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/supervisorctl.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: supervisorctl +short_description: Manage the state of a program or group of programs running via supervisord +description: + - Manage the state of a program or group of programs running via supervisord +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of the supervisord program or group to manage. + - The name will be taken as group name when it ends with a colon I(:) + - Group support is only available in Ansible version 1.6 or later. + - If I(name=all), all programs and program groups will be managed. + required: true + config: + type: path + description: + - The supervisor configuration file path + server_url: + type: str + description: + - URL on which supervisord server is listening + username: + type: str + description: + - username to use for authentication + password: + type: str + description: + - password to use for authentication + state: + type: str + description: + - The desired state of program/group. + required: true + choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ] + signal: + type: str + description: + - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled). + supervisorctl_path: + type: path + description: + - path to supervisorctl executable +notes: + - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. + - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). + - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. +requirements: [ "supervisorctl" ] +author: + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +''' + +EXAMPLES = ''' +- name: Manage the state of program to be in started state + community.general.supervisorctl: + name: my_app + state: started + +- name: Manage the state of program group to be in started state + community.general.supervisorctl: + name: 'my_apps:' + state: started + +- name: Restart my_app, reading supervisorctl configuration from a specified file + community.general.supervisorctl: + name: my_app + state: restarted + config: /var/opt/my_project/supervisord.conf + +- name: Restart my_app, connecting to supervisord with credentials and server URL + community.general.supervisorctl: + name: my_app + state: restarted + username: test + password: testpass + server_url: http://localhost:9001 + +- name: Send a signal to my_app via supervisorctl + community.general.supervisorctl: + name: my_app + state: signalled + signal: USR1 + +- name: Restart all programs and program groups + community.general.supervisorctl: + name: all + state: restarted +''' + +import os +from ansible.module_utils.basic import AnsibleModule, is_executable + + +def main(): + arg_spec = dict( + name=dict(type='str', required=True), + config=dict(type='path'), + server_url=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + supervisorctl_path=dict(type='path'), + state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), + signal=dict(type='str'), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'signalled', ['signal'])], + ) + + name = module.params['name'] + is_group = False + if name.endswith(':'): + is_group = True + name = name.rstrip(':') + state = module.params['state'] + config = module.params.get('config') + server_url = module.params.get('server_url') + username = module.params.get('username') + password = module.params.get('password') + supervisorctl_path = module.params.get('supervisorctl_path') + signal = module.params.get('signal') + + # we check error message for a pattern, so we need to make sure that's in C locale + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if supervisorctl_path: + if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path): + supervisorctl_args = [supervisorctl_path] + else: + module.fail_json( + msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) + else: + supervisorctl_args = [module.get_bin_path('supervisorctl', True)] + + if config: + supervisorctl_args.extend(['-c', config]) + if server_url: + supervisorctl_args.extend(['-s', server_url]) + if username: + supervisorctl_args.extend(['-u', username]) + if password: + supervisorctl_args.extend(['-p', password]) + + def run_supervisorctl(cmd, name=None, **kwargs): + args = list(supervisorctl_args) # copy the master args + args.append(cmd) + if name: + args.append(name) + return module.run_command(args, **kwargs) + + def get_matched_processes(): + matched = [] + rc, out, err = run_supervisorctl('status') + for line in out.splitlines(): + # One status line may look like one of these two: + # process not in group: + # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 + # process in group: + # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 + fields = [field for field in line.split(' ') if field != ''] + process_name = fields[0] + status = fields[1] + + if is_group: + # If there is ':', this process must be in a group. + if ':' in process_name: + group = process_name.split(':')[0] + if group != name: + continue + else: + continue + else: + if process_name != name and name != "all": + continue + + matched.append((process_name, status)) + return matched + + def take_action_on_processes(processes, status_filter, action, expected_result): + to_take_action_on = [] + for process_name, status in processes: + if status_filter(status): + to_take_action_on.append(process_name) + + if len(to_take_action_on) == 0: + module.exit_json(changed=False, name=name, state=state) + if module.check_mode: + module.exit_json(changed=True) + for process_name in to_take_action_on: + rc, out, err = run_supervisorctl(action, process_name, check_rc=True) + if '%s: %s' % (process_name, expected_result) not in out: + module.fail_json(msg=out) + + module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) + + if state == 'restarted': + rc, out, err = run_supervisorctl('update', check_rc=True) + processes = get_matched_processes() + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + + take_action_on_processes(processes, lambda s: True, 'restart', 'started') + + processes = get_matched_processes() + + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + if state == 'present': + if len(processes) > 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + dummy, out, dummy = run_supervisorctl('add', name) + if '%s: added process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + + # from this point onwards, if there are no matching processes, module cannot go on. + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + + if state == 'started': + take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') + + if state == 'stopped': + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') + + if state == 'signalled': + take_action_on_processes(processes, lambda s: s in ('RUNNING',), "signal %s" % signal, 'signalled') + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/svc.py b/ansible_collections/community/general/plugins/modules/svc.py new file mode 100644 index 000000000..bd2eaeb22 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/svc.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: svc +author: +- Brian Coca (@bcoca) +short_description: Manage daemontools services +description: + - Controls daemontools services on remote hosts using the svc utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - C(Started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + svc (svc -t) and C(killed) will always bounce the svc (svc -k). + C(reloaded) will send a sigusr1 (svc -1). + C(once) will run a normally downed svc once (svc -o), not really + an idempotent operation. + type: str + choices: [ killed, once, reloaded, restarted, started, stopped ] + downed: + description: + - Should a 'down' file exist or not, if it exists it disables auto startup. + Defaults to no. Downed does not imply stopped. + type: bool + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + Take note that a service can be enabled and downed (no auto restart). + type: bool + service_dir: + description: + - Directory svscan watches for services + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to service_dir. + type: str + default: /etc/service +''' + +EXAMPLES = ''' +- name: Start svc dnscache, if not running + community.general.svc: + name: dnscache + state: started + +- name: Stop svc dnscache, if running + community.general.svc: + name: dnscache + state: stopped + +- name: Kill svc dnscache, in all cases + community.general.svc: + name: dnscache + state: killed + +- name: Restart svc dnscache, in all cases + community.general.svc: + name: dnscache + state: restarted + +- name: Reload svc dnscache, in all cases + community.general.svc: + name: dnscache + state: reloaded + +- name: Using alternative svc directory location + community.general.svc: + name: dnscache + state: reloaded + service_dir: /var/service +''' + +import os +import re +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + + +class Svc(object): + """ + Main class that handles daemontools, can be subclassed and overridden in case + we want to use a 'derivative' like encore, s6, etc + """ + + # def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + def __init__(self, module): + self.extra_paths = ['/command', '/usr/local/bin'] + self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.downed = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([self.service_dir, self.name]) + self.src_full = '/'.join([self.service_src, self.name]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.downed = os.path.lexists('%s/down' % self.svc_full) + self.get_status() + else: + self.downed = os.path.lexists('%s/down' % self.src_full) + self.state = 'stopped' + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError as e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + try: + os.unlink(self.svc_full) + except OSError as e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e)) + self.execute_command([self.svc_cmd, '-dx', self.src_full]) + + src_log = '%s/log' % self.src_full + if os.path.exists(src_log): + self.execute_command([self.svc_cmd, '-dx', src_log]) + + def get_status(self): + rc, out, err = self.execute_command([self.svstat_cmd, self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + + m = re.search(r'\(pid (\d+)\)', out) + if m: + self.pid = m.group(1) + + m = re.search(r'(\d+) seconds', out) + if m: + self.duration = m.group(1) + + if re.search(' up ', out): + self.state = 'start' + elif re.search(' down ', out): + self.state = 'stopp' + else: + self.state = 'unknown' + return + + if re.search(' want ', out): + self.state += 'ing' + else: + self.state += 'ed' + + def start(self): + return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + + def stopp(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + + def reload(self): + return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + + def restart(self): + return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + + def kill(self): + return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + + def execute_command(self, cmd): + try: + rc, out, err = self.module.run_command(cmd) + except Exception as e: + self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc()) + return (rc, out, err) + + def report(self): + self.get_status() + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states + + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), + enabled=dict(type='bool'), + downed=dict(type='bool'), + service_dir=dict(type='str', default='/service'), + service_src=dict(type='str', default='/etc/service'), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + state = module.params['state'] + enabled = module.params['enabled'] + downed = module.params['downed'] + + svc = Svc(module) + changed = False + orig_state = svc.report() + + if enabled is not None and enabled != svc.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + svc.enable() + else: + svc.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could not change service link: %s" % to_native(e)) + + if state is not None and state != svc.state: + changed = True + if not module.check_mode: + getattr(svc, state[:-2])() + + if downed is not None and downed != svc.downed: + changed = True + if not module.check_mode: + d_file = "%s/down" % svc.svc_full + try: + if downed: + open(d_file, "a").close() + else: + os.unlink(d_file) + except (OSError, IOError) as e: + module.fail_json(msg="Could not change downed file: %s " % (to_native(e))) + + module.exit_json(changed=changed, svc=svc.report()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/svr4pkg.py b/ansible_collections/community/general/plugins/modules/svr4pkg.py new file mode 100644 index 000000000..e8c410482 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/svr4pkg.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2012, Boyd Adamson +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: svr4pkg +short_description: Manage Solaris SVR4 packages +description: + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available + as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It will not enforce + dependencies on install or remove. +author: "Boyd Adamson (@brontitall)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Package name, e.g. C(SUNWcsr) + required: true + type: str + + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + - If the package is to be installed, then I(src) is required. + - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. + required: true + choices: ["present", "absent"] + type: str + + src: + description: + - Specifies the location to install the package from. Required when I(state=present). + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there. + type: str + proxy: + description: + - HTTP[s] proxy to be used if I(src) is a URL. + type: str + response_file: + description: + - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) + required: false + type: str + zone: + description: + - Whether to install the package only in the current zone, or install it into all zones. + - The installation into all zones works only if you are working with the global zone. + required: false + default: "all" + choices: ["current", "all"] + type: str + category: + description: + - Install/Remove category instead of a single package. + required: false + type: bool + default: false +''' + +EXAMPLES = ''' +- name: Install a package from an already copied file + community.general.svr4pkg: + name: CSWcommon + src: /tmp/cswpkgs.pkg + state: present + +- name: Install a package directly from an http site + community.general.svr4pkg: + name: CSWpkgutil + src: 'http://get.opencsw.org/now' + state: present + zone: current + +- name: Install a package with a response file + community.general.svr4pkg: + name: CSWggrep + src: /tmp/third-party.pkg + response_file: /tmp/ggrep.response + state: present + +- name: Ensure that a package is not installed + community.general.svr4pkg: + name: SUNWgnome-sound-recorder + state: absent + +- name: Ensure that a category is not installed + community.general.svr4pkg: + name: FIREFOX + state: absent + category: true +''' + + +import os +import tempfile + +from ansible.module_utils.basic import AnsibleModule + + +def package_installed(module, name, category): + cmd = [module.get_bin_path('pkginfo', True), '-q'] + if category: + cmd.append('-c') + cmd.append(name) + rc, out, err = module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + + +def create_admin_file(): + (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) + fullauto = b''' +mail= +instance=unique +partial=nocheck +runlevel=quit +idepend=nocheck +rdepend=nocheck +space=quit +setuid=nocheck +conflict=nocheck +action=nocheck +networktimeout=60 +networkretries=3 +authentication=quit +keystore=/var/sadm/security +proxy= +basedir=default +''' + os.write(desc, fullauto) + os.close(desc) + return filename + + +def run_command(module, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + + +def package_install(module, name, src, proxy, response_file, zone, category): + adminfile = create_admin_file() + cmd = ['pkgadd', '-n'] + if zone == 'current': + cmd += ['-G'] + cmd += ['-a', adminfile, '-d', src] + if proxy is not None: + cmd += ['-x', proxy] + if response_file is not None: + cmd += ['-r', response_file] + if category: + cmd += ['-Y'] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def package_uninstall(module, name, src, category): + adminfile = create_admin_file() + if category: + cmd = ['pkgrm', '-na', adminfile, '-Y', name] + else: + cmd = ['pkgrm', '-na', adminfile, name] + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=True, choices=['present', 'absent']), + src=dict(default=None), + proxy=dict(default=None), + response_file=dict(default=None), + zone=dict(required=False, default='all', choices=['current', 'all']), + category=dict(default=False, type='bool') + ), + supports_check_mode=True + ) + state = module.params['state'] + name = module.params['name'] + src = module.params['src'] + proxy = module.params['proxy'] + response_file = module.params['response_file'] + zone = module.params['zone'] + category = module.params['category'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if src is None: + module.fail_json(name=name, + msg="src is required when state=present") + if not package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'absent': + if package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name, src, category) + out = out[:75] + + # Returncodes as per pkgadd(1m) + # 0 Successful completion + # 1 Fatal error. + # 2 Warning. + # 3 Interruption. + # 4 Administration. + # 5 Administration. Interaction is required. Do not use pkgadd -n. + # 10 Reboot after installation of all packages. + # 20 Reboot after installation of this package. + # 99 (observed) pkgadd: ERROR: could not process datastream from + if rc in (0, 2, 3, 10, 20): + result['changed'] = True + # no install nor uninstall, or failed + else: + result['changed'] = False + + # rc will be none when the package already was installed and no action took place + # Only return failed=False when the returncode is known to be good as there may be more + # undocumented failure return codes + if rc not in (None, 0, 2, 10, 20): + result['failed'] = True + else: + result['failed'] = False + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/swdepot.py b/ansible_collections/community/general/plugins/modules/swdepot.py new file mode 100644 index 000000000..c4660c70d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/swdepot.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Raul Melo +# Written by Raul Melo +# Based on yum module written by Seth Vidal +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: swdepot +short_description: Manage packages with swdepot package manager (HP-UX) +description: + - Will install, upgrade and remove packages with swdepot package manager (HP-UX) +notes: [] +author: "Raul Melo (@melodous)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - package name. + aliases: [pkg] + required: true + type: str + state: + description: + - whether to install (C(present), C(latest)), or remove (C(absent)) a package. + required: true + choices: [ 'present', 'latest', 'absent'] + type: str + depot: + description: + - The source repository from which install or upgrade a package. + type: str +''' + +EXAMPLES = ''' +- name: Install a package + community.general.swdepot: + name: unzip-6.0 + state: present + depot: 'repository:/path' + +- name: Install the latest version of a package + community.general.swdepot: + name: unzip + state: latest + depot: 'repository:/path' + +- name: Remove a package + community.general.swdepot: + name: unzip + state: absent +''' + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote + + +def compare_package(version1, version2): + """ Compare version packages. + Return values: + -1 first minor + 0 equal + 1 first greater """ + + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] + normalized_version1 = normalize(version1) + normalized_version2 = normalize(version2) + if normalized_version1 == normalized_version2: + rc = 0 + elif normalized_version1 < normalized_version2: + rc = -1 + else: + rc = 1 + return rc + + +def query_package(module, name, depot=None): + """ Returns whether a package is installed or not and version. """ + + cmd_list = '/usr/sbin/swlist -a revision -l product' + if depot: + rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)), + use_unsafe_shell=True) + else: + rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True) + if rc == 0: + version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] + else: + version = None + + return rc, version + + +def remove_package(module, name): + """ Uninstall package if installed. """ + + cmd_remove = '/usr/sbin/swremove' + rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) + + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def install_package(module, depot, name): + """ Install package if not already installed """ + + cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' + rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) + if rc == 0: + return rc, stdout + else: + return rc, stderr + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pkg'], required=True), + state=dict(choices=['present', 'absent', 'latest'], required=True), + depot=dict(default=None, required=False) + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + depot = module.params['depot'] + + changed = False + msg = "No changed" + rc = 0 + if (state == 'present' or state == 'latest') and depot is None: + output = "depot parameter is mandatory in present or latest task" + module.fail_json(name=name, msg=output, rc=rc) + + # Check local version + rc, version_installed = query_package(module, name) + if not rc: + installed = True + msg = "Already installed" + + else: + installed = False + + if (state == 'present' or state == 'latest') and installed is False: + if module.check_mode: + module.exit_json(changed=True) + rc, output = install_package(module, depot, name) + + if not rc: + changed = True + msg = "Package installed" + + else: + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'latest' and installed is True: + # Check depot version + rc, version_depot = query_package(module, name, depot) + + if not rc: + if compare_package(version_installed, version_depot) == -1: + if module.check_mode: + module.exit_json(changed=True) + # Install new version + rc, output = install_package(module, depot, name) + + if not rc: + msg = "Package upgraded, Before " + version_installed + " Now " + version_depot + changed = True + + else: + module.fail_json(name=name, msg=output, rc=rc) + + else: + output = "Software package not in repository " + depot + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'absent' and installed is True: + if module.check_mode: + module.exit_json(changed=True) + rc, output = remove_package(module, name) + if not rc: + changed = True + msg = "Package removed" + else: + module.fail_json(name=name, msg=output, rc=rc) + + if module.check_mode: + module.exit_json(changed=False) + + module.exit_json(changed=changed, name=name, state=state, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/swupd.py b/ansible_collections/community/general/plugins/modules/swupd.py new file mode 100644 index 000000000..efd7ca7c1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/swupd.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Alberto Murillo +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: swupd +short_description: Manages updates and bundles in ClearLinux systems +description: + - Manages updates and bundles with the swupd bundle manager, which is used by the + Clear Linux Project for Intel Architecture. +author: Alberto Murillo (@albertomurillo) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + contenturl: + description: + - URL pointing to the contents of available bundles. + If not specified, the contents are retrieved from clearlinux.org. + type: str + format: + description: + - The format suffix for version file downloads. For example [1,2,3,staging,etc]. + If not specified, the default format is used. + type: str + manifest: + description: + - The manifest contains information about the bundles at certain version of the OS. + Specify a Manifest version to verify against that version or leave unspecified to + verify against the current version. + aliases: [release, version] + type: int + name: + description: + - Name of the (I)bundle to install or remove. + aliases: [bundle] + type: str + state: + description: + - Indicates the desired (I)bundle state. C(present) ensures the bundle + is installed while C(absent) ensures the (I)bundle is not installed. + default: present + choices: [present, absent] + type: str + update: + description: + - Updates the OS to the latest version. + type: bool + default: false + url: + description: + - Overrides both I(contenturl) and I(versionurl). + type: str + verify: + description: + - Verify content for OS version. + type: bool + default: false + versionurl: + description: + - URL for version string download. + type: str +''' + +EXAMPLES = ''' +- name: Update the OS to the latest version + community.general.swupd: + update: true + +- name: Installs the "foo" bundle + community.general.swupd: + name: foo + state: present + +- name: Removes the "foo" bundle + community.general.swupd: + name: foo + state: absent + +- name: Check integrity of filesystem + community.general.swupd: + verify: true + +- name: Downgrade OS to release 12920 + community.general.swupd: + verify: true + manifest: 12920 +''' + +RETURN = ''' +stdout: + description: stdout of swupd + returned: always + type: str +stderr: + description: stderr of swupd + returned: always + type: str +''' + +import os +from ansible.module_utils.basic import AnsibleModule + + +class Swupd(object): + FILES_NOT_MATCH = "files did not match" + FILES_REPLACED = "missing files were replaced" + FILES_FIXED = "files were fixed" + FILES_DELETED = "files were deleted" + + def __init__(self, module): + # Fail if swupd is not found + self.module = module + self.swupd_cmd = module.get_bin_path("swupd", False) + if not self.swupd_cmd: + module.fail_json(msg="Could not find swupd.") + + # Initialize parameters + for key in module.params.keys(): + setattr(self, key, module.params[key]) + + # Initialize return values + self.changed = False + self.failed = False + self.msg = None + self.rc = None + self.stderr = "" + self.stdout = "" + + def _run_cmd(self, cmd): + self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False) + + def _get_cmd(self, command): + cmd = "%s %s" % (self.swupd_cmd, command) + + if self.format: + cmd += " --format=%s" % self.format + if self.manifest: + cmd += " --manifest=%s" % self.manifest + if self.url: + cmd += " --url=%s" % self.url + else: + if self.contenturl and command != "check-update": + cmd += " --contenturl=%s" % self.contenturl + if self.versionurl: + cmd += " --versionurl=%s" % self.versionurl + + return cmd + + def _is_bundle_installed(self, bundle): + try: + os.stat("/usr/share/clear/bundles/%s" % bundle) + except OSError: + return False + + return True + + def _needs_update(self): + cmd = self._get_cmd("check-update") + self._run_cmd(cmd) + + if self.rc == 0: + return True + + if self.rc == 1: + return False + + self.failed = True + self.msg = "Failed to check for updates" + + def _needs_verify(self): + cmd = self._get_cmd("verify") + self._run_cmd(cmd) + + if self.rc != 0: + self.failed = True + self.msg = "Failed to check for filesystem inconsistencies." + + if self.FILES_NOT_MATCH in self.stdout: + return True + + return False + + def install_bundle(self, bundle): + """Installs a bundle with `swupd bundle-add bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=not self._is_bundle_installed(bundle)) + + if self._is_bundle_installed(bundle): + self.msg = "Bundle %s is already installed" % bundle + return + + cmd = self._get_cmd("bundle-add %s" % bundle) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s installed" % bundle + return + + self.failed = True + self.msg = "Failed to install bundle %s" % bundle + + def remove_bundle(self, bundle): + """Removes a bundle with `swupd bundle-remove bundle`""" + if self.module.check_mode: + self.module.exit_json(changed=self._is_bundle_installed(bundle)) + + if not self._is_bundle_installed(bundle): + self.msg = "Bundle %s not installed" + return + + cmd = self._get_cmd("bundle-remove %s" % bundle) + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Bundle %s removed" % bundle + return + + self.failed = True + self.msg = "Failed to remove bundle %s" % bundle + + def update_os(self): + """Updates the os with `swupd update`""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_update()) + + if not self._needs_update(): + self.msg = "There are no updates available" + return + + cmd = self._get_cmd("update") + self._run_cmd(cmd) + + if self.rc == 0: + self.changed = True + self.msg = "Update successful" + return + + self.failed = True + self.msg = "Failed to check for updates" + + def verify_os(self): + """Verifies filesystem against specified or current version""" + if self.module.check_mode: + self.module.exit_json(changed=self._needs_verify()) + + if not self._needs_verify(): + self.msg = "No files where changed" + return + + cmd = self._get_cmd("verify --fix") + self._run_cmd(cmd) + + if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): + self.changed = True + self.msg = "Fix successful" + return + + self.failed = True + self.msg = "Failed to verify the OS" + + +def main(): + """The main function.""" + module = AnsibleModule( + argument_spec=dict( + contenturl=dict(type="str"), + format=dict(type="str"), + manifest=dict(aliases=["release", "version"], type="int"), + name=dict(aliases=["bundle"], type="str"), + state=dict(default="present", choices=["present", "absent"], type="str"), + update=dict(default=False, type="bool"), + url=dict(type="str"), + verify=dict(default=False, type="bool"), + versionurl=dict(type="str"), + ), + required_one_of=[["name", "update", "verify"]], + mutually_exclusive=[["name", "update", "verify"]], + supports_check_mode=True + ) + + swupd = Swupd(module) + + name = module.params["name"] + state = module.params["state"] + update = module.params["update"] + verify = module.params["verify"] + + if update: + swupd.update_os() + elif verify: + swupd.verify_os() + elif state == "present": + swupd.install_bundle(name) + elif state == "absent": + swupd.remove_bundle(name) + else: + swupd.failed = True + + if swupd.failed: + module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + else: + module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/syslogger.py b/ansible_collections/community/general/plugins/modules/syslogger.py new file mode 100644 index 000000000..3a7abf4fb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/syslogger.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Tim Rightnour +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: syslogger +short_description: Log messages in the syslog +description: + - Uses syslog to add log entries to the host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + msg: + type: str + description: + - This is the message to place in syslog. + required: true + priority: + type: str + description: + - Set the log priority. + choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] + default: "info" + facility: + type: str + description: + - Set the log facility. + choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", + "uucp", "cron", "syslog", "local0", "local1", "local2", + "local3", "local4", "local5", "local6", "local7" ] + default: "daemon" + log_pid: + description: + - Log the PID in brackets. + type: bool + default: false + ident: + description: + - Specify the name of application name which is sending the log to syslog. + type: str + default: 'ansible_syslogger' + version_added: '0.2.0' +author: + - Tim Rightnour (@garbled1) +''' + +EXAMPLES = r''' +- name: Simple Usage + community.general.syslogger: + msg: "I will end up as daemon.info" + +- name: Send a log message with err priority and user facility with log_pid + community.general.syslogger: + msg: "Hello from Ansible" + priority: "err" + facility: "user" + log_pid: true + +- name: Specify the name of application which is sending log message + community.general.syslogger: + ident: "MyApp" + msg: "I want to believe" + priority: "alert" +''' + +RETURN = r''' +ident: + description: Name of application sending the message to log + returned: always + type: str + sample: "ansible_syslogger" + version_added: '0.2.0' +priority: + description: Priority level + returned: always + type: str + sample: "daemon" +facility: + description: Syslog facility + returned: always + type: str + sample: "info" +log_pid: + description: Log PID status + returned: always + type: bool + sample: true +msg: + description: Message sent to syslog + returned: always + type: str + sample: "Hello from Ansible" +''' + +import syslog +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def get_facility(facility): + return { + 'kern': syslog.LOG_KERN, + 'user': syslog.LOG_USER, + 'mail': syslog.LOG_MAIL, + 'daemon': syslog.LOG_DAEMON, + 'auth': syslog.LOG_AUTH, + 'lpr': syslog.LOG_LPR, + 'news': syslog.LOG_NEWS, + 'uucp': syslog.LOG_UUCP, + 'cron': syslog.LOG_CRON, + 'syslog': syslog.LOG_SYSLOG, + 'local0': syslog.LOG_LOCAL0, + 'local1': syslog.LOG_LOCAL1, + 'local2': syslog.LOG_LOCAL2, + 'local3': syslog.LOG_LOCAL3, + 'local4': syslog.LOG_LOCAL4, + 'local5': syslog.LOG_LOCAL5, + 'local6': syslog.LOG_LOCAL6, + 'local7': syslog.LOG_LOCAL7 + }.get(facility, syslog.LOG_DAEMON) + + +def get_priority(priority): + return { + 'emerg': syslog.LOG_EMERG, + 'alert': syslog.LOG_ALERT, + 'crit': syslog.LOG_CRIT, + 'err': syslog.LOG_ERR, + 'warning': syslog.LOG_WARNING, + 'notice': syslog.LOG_NOTICE, + 'info': syslog.LOG_INFO, + 'debug': syslog.LOG_DEBUG + }.get(priority, syslog.LOG_INFO) + + +def main(): + # define the available arguments/parameters that a user can pass to + # the module + module_args = dict( + ident=dict(type='str', default='ansible_syslogger'), + msg=dict(type='str', required=True), + priority=dict(type='str', required=False, + choices=["emerg", "alert", "crit", "err", "warning", + "notice", "info", "debug"], + default='info'), + facility=dict(type='str', required=False, + choices=["kern", "user", "mail", "daemon", "auth", + "lpr", "news", "uucp", "cron", "syslog", + "local0", "local1", "local2", "local3", + "local4", "local5", "local6", "local7"], + default='daemon'), + log_pid=dict(type='bool', required=False, default=False) + ) + + module = AnsibleModule( + argument_spec=module_args, + ) + + result = dict( + changed=False, + ident=module.params['ident'], + priority=module.params['priority'], + facility=module.params['facility'], + log_pid=module.params['log_pid'], + msg=module.params['msg'] + ) + + # do the logging + try: + syslog.openlog(module.params['ident'], + syslog.LOG_PID if module.params['log_pid'] else 0, + get_facility(module.params['facility'])) + syslog.syslog(get_priority(module.params['priority']), + module.params['msg']) + syslog.closelog() + result['changed'] = True + + except Exception as exc: + module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/syspatch.py b/ansible_collections/community/general/plugins/modules/syspatch.py new file mode 100644 index 000000000..c90ef0d22 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/syspatch.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019-2020, Andrew Klaus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: syspatch + +short_description: Manage OpenBSD system patches + + +description: + - "Manage OpenBSD system patches using syspatch." + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + revert: + description: + - Revert system patches. + type: str + choices: [ all, one ] + +author: + - Andrew Klaus (@precurse) +''' + +EXAMPLES = ''' +- name: Apply all available system patches + community.general.syspatch: + +- name: Revert last patch + community.general.syspatch: + revert: one + +- name: Revert all patches + community.general.syspatch: + revert: all + +# NOTE: You can reboot automatically if a patch requires it: +- name: Apply all patches and store result + community.general.syspatch: + register: syspatch + +- name: Reboot if patch requires it + ansible.builtin.reboot: + when: syspatch.reboot_needed +''' + +RETURN = r''' +rc: + description: The command return code (0 means success) + returned: always + type: int +stdout: + description: syspatch standard output. + returned: always + type: str + sample: "001_rip6cksum" +stderr: + description: syspatch standard error. + returned: always + type: str + sample: "syspatch: need root privileges" +reboot_needed: + description: Whether or not a reboot is required after an update. + returned: always + type: bool + sample: true +''' + +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + revert=dict(type='str', choices=['all', 'one']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + result = syspatch_run(module) + + module.exit_json(**result) + + +def syspatch_run(module): + cmd = module.get_bin_path('syspatch', True) + changed = False + reboot_needed = False + warnings = [] + + # Set safe defaults for run_flag and check_flag + run_flag = ['-c'] + check_flag = ['-c'] + if module.params['revert']: + check_flag = ['-l'] + + if module.params['revert'] == 'all': + run_flag = ['-R'] + else: + run_flag = ['-r'] + else: + check_flag = ['-c'] + run_flag = [] + + # Run check command + rc, out, err = module.run_command([cmd] + check_flag) + + if rc != 0: + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + + if len(out) > 0: + # Changes pending + change_pending = True + else: + # No changes pending + change_pending = False + + if module.check_mode: + changed = change_pending + elif change_pending: + rc, out, err = module.run_command([cmd] + run_flag) + + # Workaround syspatch ln bug: + # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html + if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n': + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + elif out.lower().find('create unique kernel') >= 0: + # Kernel update applied + reboot_needed = True + elif out.lower().find('syspatch updated itself') >= 0: + warnings.append('Syspatch was updated. Please run syspatch again.') + + # If no stdout, then warn user + if len(out) == 0: + warnings.append('syspatch had suggested changes, but stdout was empty.') + + changed = True + else: + changed = False + + return dict( + changed=changed, + reboot_needed=reboot_needed, + rc=rc, + stderr=err, + stdout=out, + warnings=warnings + ) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sysrc.py b/ansible_collections/community/general/plugins/modules/sysrc.py new file mode 100644 index 000000000..9652b629a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sysrc.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019 David Lundgren +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - David Lundgren (@dlundgren) +module: sysrc +short_description: Manage FreeBSD using sysrc +version_added: '2.0.0' +description: + - Manages C(/etc/rc.conf) for FreeBSD. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of variable in C(/etc/rc.conf) to manage. + type: str + required: true + value: + description: + - The value to set when I(state=present). + - The value to add when I(state=value_present). + - The value to remove when I(state=value_absent). + type: str + state: + description: + - Use I(present) to add the variable. + - Use I(absent) to remove the variable. + - Use I(value_present) to add the value to the existing variable. + - Use I(value_absent) to remove the value from the existing variable. + type: str + default: "present" + choices: [ absent, present, value_present, value_absent ] + path: + description: + - Path to file to use instead of C(/etc/rc.conf). + type: str + default: "/etc/rc.conf" + delim: + description: + - Delimiter to be used instead of C( ). + - Only used when I(state=value_present) or I(state=value_absent). + default: " " + type: str + jail: + description: + - Name or ID of the jail to operate on. + type: str +notes: + - The C(name) cannot contain periods as sysrc does not support OID style names. +''' + +EXAMPLES = r''' +--- +# enable mysql in the /etc/rc.conf +- name: Configure mysql pid file + community.general.sysrc: + name: mysql_pidfile + value: "/var/run/mysqld/mysqld.pid" + +# enable accf_http kld in the boot loader +- name: Enable accf_http kld + community.general.sysrc: + name: accf_http_load + state: present + value: "YES" + path: /boot/loader.conf + +# add gif0 to cloned_interfaces +- name: Add gif0 interface + community.general.sysrc: + name: cloned_interfaces + state: value_present + value: "gif0" + +# enable nginx on a jail +- name: Enable nginx in test jail + community.general.sysrc: + name: nginx_enable + value: "YES" + jail: testjail +''' + +RETURN = r''' +changed: + description: Return changed for sysrc actions. + returned: always + type: bool + sample: true +''' + +from ansible.module_utils.basic import AnsibleModule +import re + + +class Sysrc(object): + def __init__(self, module, name, value, path, delim, jail): + self.module = module + self.name = name + self.changed = False + self.value = value + self.path = path + self.delim = delim + self.jail = jail + self.sysrc = module.get_bin_path('sysrc', True) + + def has_unknown_variable(self, out, err): + # newer versions of sysrc use stderr instead of stdout + return err.find("unknown variable") > 0 or out.find("unknown variable") > 0 + + def exists(self): + # sysrc doesn't really use exit codes + (rc, out, err) = self.run_sysrc(self.name) + if self.value is None: + regex = "%s: " % re.escape(self.name) + else: + regex = "%s: %s$" % (re.escape(self.name), re.escape(self.value)) + + return not self.has_unknown_variable(out, err) and re.match(regex, out) is not None + + def contains(self): + (rc, out, err) = self.run_sysrc('-n', self.name) + if self.has_unknown_variable(out, err): + return False + + return self.value in out.strip().split(self.delim) + + def present(self): + if self.exists(): + return + + if self.module.check_mode: + self.changed = True + return + + (rc, out, err) = self.run_sysrc("%s=%s" % (self.name, self.value)) + if out.find("%s:" % self.name) == 0 and re.search("-> %s$" % re.escape(self.value), out) is not None: + self.changed = True + + def absent(self): + if not self.exists(): + return + + # inversed since we still need to mark as changed + if not self.module.check_mode: + (rc, out, err) = self.run_sysrc('-x', self.name) + if self.has_unknown_variable(out, err): + return + + self.changed = True + + def value_present(self): + if self.contains(): + return + + if self.module.check_mode: + self.changed = True + return + + setstring = '%s+=%s%s' % (self.name, self.delim, self.value) + (rc, out, err) = self.run_sysrc(setstring) + if out.find("%s:" % self.name) == 0: + values = out.split(' -> ')[1].strip().split(self.delim) + if self.value in values: + self.changed = True + + def value_absent(self): + if not self.contains(): + return + + if self.module.check_mode: + self.changed = True + return + + setstring = '%s-=%s%s' % (self.name, self.delim, self.value) + (rc, out, err) = self.run_sysrc(setstring) + if out.find("%s:" % self.name) == 0: + values = out.split(' -> ')[1].strip().split(self.delim) + if self.value not in values: + self.changed = True + + def run_sysrc(self, *args): + cmd = [self.sysrc, '-f', self.path] + if self.jail: + cmd += ['-j', self.jail] + cmd.extend(args) + + (rc, out, err) = self.module.run_command(cmd) + + return (rc, out, err) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str', default=None), + state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']), + path=dict(type='str', default='/etc/rc.conf'), + delim=dict(type='str', default=' '), + jail=dict(type='str', default=None), + ), + supports_check_mode=True, + ) + + name = module.params.pop('name') + # OID style names are not supported + if not re.match('^[a-zA-Z0-9_]+$', name): + module.fail_json( + msg="Name may only contain alpha-numeric and underscore characters" + ) + + value = module.params.pop('value') + state = module.params.pop('state') + path = module.params.pop('path') + delim = module.params.pop('delim') + jail = module.params.pop('jail') + result = dict( + name=name, + state=state, + value=value, + path=path, + delim=delim, + jail=jail + ) + + rc_value = Sysrc(module, name, value, path, delim, jail) + + if state == 'present': + rc_value.present() + elif state == 'absent': + rc_value.absent() + elif state == 'value_present': + rc_value.value_present() + elif state == 'value_absent': + rc_value.value_absent() + + result['changed'] = rc_value.changed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/sysupgrade.py b/ansible_collections/community/general/plugins/modules/sysupgrade.py new file mode 100644 index 000000000..ac80e0196 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/sysupgrade.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Andrew Klaus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: sysupgrade +short_description: Manage OpenBSD system upgrades +version_added: 1.1.0 +description: + - Manage OpenBSD system upgrades using sysupgrade. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + snapshot: + description: + - Apply the latest snapshot. + - Otherwise release will be applied. + default: false + type: bool + force: + description: + - Force upgrade (for snapshots only). + default: false + type: bool + keep_files: + description: + - Keep the files under /home/_sysupgrade. + - By default, the files will be deleted after the upgrade. + default: false + type: bool + fetch_only: + description: + - Fetch and verify files and create /bsd.upgrade but do not reboot. + - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples. + default: true + type: bool + installurl: + description: + - OpenBSD mirror top-level URL for fetching an upgrade. + - By default, the mirror URL is pulled from /etc/installurl. + type: str +author: + - Andrew Klaus (@precurse) +''' + +EXAMPLES = r''' +- name: Upgrade to latest release + community.general.sysupgrade: + register: sysupgrade + +- name: Upgrade to latest snapshot + community.general.sysupgrade: + snapshot: true + installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD + register: sysupgrade + +- name: Reboot to apply upgrade if needed + ansible.builtin.reboot: + when: sysupgrade.changed + +# Note: Ansible will error when running this way due to how +# the reboot is forcefully handled by sysupgrade: + +- name: Have sysupgrade automatically reboot + community.general.sysupgrade: + fetch_only: false + ignore_errors: true +''' + +RETURN = r''' +rc: + description: The command return code (0 means success). + returned: always + type: int +stdout: + description: Sysupgrade standard output. + returned: always + type: str +stderr: + description: Sysupgrade standard error. + returned: always + type: str + sample: "sysupgrade: need root privileges" +''' + +from ansible.module_utils.basic import AnsibleModule + + +def sysupgrade_run(module): + sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True) + cmd = [sysupgrade_bin] + changed = False + warnings = [] + + # Setup command flags + if module.params['snapshot']: + run_flag = ['-s'] + if module.params['force']: + # Force only applies to snapshots + run_flag.append('-f') + else: + # release flag + run_flag = ['-r'] + + if module.params['keep_files']: + run_flag.append('-k') + + if module.params['fetch_only']: + run_flag.append('-n') + + # installurl must be the last argument + if module.params['installurl']: + run_flag.append(module.params['installurl']) + + rc, out, err = module.run_command(cmd + run_flag) + + if rc != 0: + module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err)) + elif out.lower().find('already on latest snapshot') >= 0: + changed = False + elif out.lower().find('upgrade on next reboot') >= 0: + changed = True + + return dict( + changed=changed, + rc=rc, + stderr=err, + stdout=out, + warnings=warnings + ) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + snapshot=dict(type='bool', default=False), + fetch_only=dict(type='bool', default=True), + force=dict(type='bool', default=False), + keep_files=dict(type='bool', default=False), + installurl=dict(type='str'), + ), + supports_check_mode=False, + ) + return_dict = sysupgrade_run(module) + module.exit_json(**return_dict) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/taiga_issue.py b/ansible_collections/community/general/plugins/modules/taiga_issue.py new file mode 100644 index 000000000..e80ff43b8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/taiga_issue.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Alejandro Guirao +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: taiga_issue +short_description: Creates/deletes an issue in a Taiga Project Management Platform +description: + - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)). + - An issue is identified by the combination of project, issue subject and issue type. + - This module implements the creation or deletion of issues (not the update). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + taiga_host: + type: str + description: + - The hostname of the Taiga instance. + default: https://api.taiga.io + project: + type: str + description: + - Name of the project containing the issue. Must exist previously. + required: true + subject: + type: str + description: + - The issue subject. + required: true + issue_type: + type: str + description: + - The issue type. Must exist previously. + required: true + priority: + type: str + description: + - The issue priority. Must exist previously. + default: Normal + status: + type: str + description: + - The issue status. Must exist previously. + default: New + severity: + type: str + description: + - The issue severity. Must exist previously. + default: Normal + description: + type: str + description: + - The issue description. + default: "" + attachment: + type: path + description: + - Path to a file to be attached to the issue. + attachment_description: + type: str + description: + - A string describing the file to be attached to the issue. + default: "" + tags: + type: list + elements: str + description: + - A lists of tags to be assigned to the issue. + default: [] + state: + type: str + description: + - Whether the issue should be present or not. + choices: ["present", "absent"] + default: present +author: Alejandro Guirao (@lekum) +requirements: [python-taiga] +notes: +- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD +''' + +EXAMPLES = ''' +- name: Create an issue in the my hosted Taiga environment and attach an error log + community.general.taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + priority: High + status: New + severity: Important + description: An error has been found. Please check the attached error log for details. + attachment: /path/to/error.log + attachment_description: Error log file + tags: + - Error + - Needs manual check + state: present + +- name: Deletes the previously created issue + community.general.taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + state: absent +''' + +RETURN = '''# ''' +import traceback + +from os import getenv +from os.path import isfile +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +TAIGA_IMP_ERR = None +try: + from taiga import TaigaAPI + from taiga.exceptions import TaigaException + TAIGA_MODULE_IMPORTED = True +except ImportError: + TAIGA_IMP_ERR = traceback.format_exc() + TAIGA_MODULE_IMPORTED = False + + +def manage_issue(taiga_host, project_name, issue_subject, issue_priority, + issue_status, issue_type, issue_severity, issue_description, + issue_attachment, issue_attachment_description, + issue_tags, state, check_mode=False): + """ + Method that creates/deletes issues depending whether they exist and the state desired + + The credentials should be passed via environment variables: + - TAIGA_TOKEN + - TAIGA_USERNAME and TAIGA_PASSWORD + + Returns a tuple with these elements: + - A boolean representing the success of the operation + - A descriptive message + - A dict with the issue attributes, in case of issue creation, otherwise empty dict + """ + + changed = False + + try: + token = getenv('TAIGA_TOKEN') + if token: + api = TaigaAPI(host=taiga_host, token=token) + else: + api = TaigaAPI(host=taiga_host) + username = getenv('TAIGA_USERNAME') + password = getenv('TAIGA_PASSWORD') + if not any([username, password]): + return False, changed, "Missing credentials", {} + api.auth(username=username, password=password) + + user_id = api.me().id + project_list = list(filter(lambda x: x.name == project_name, api.projects.list(member=user_id))) + if len(project_list) != 1: + return False, changed, "Unable to find project %s" % project_name, {} + project = project_list[0] + project_id = project.id + + priority_list = list(filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))) + if len(priority_list) != 1: + return False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {} + priority_id = priority_list[0].id + + status_list = list(filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))) + if len(status_list) != 1: + return False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {} + status_id = status_list[0].id + + type_list = list(filter(lambda x: x.name == issue_type, project.list_issue_types())) + if len(type_list) != 1: + return False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {} + type_id = type_list[0].id + + severity_list = list(filter(lambda x: x.name == issue_severity, project.list_severities())) + if len(severity_list) != 1: + return False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {} + severity_id = severity_list[0].id + + issue = { + "project": project_name, + "subject": issue_subject, + "priority": issue_priority, + "status": issue_status, + "type": issue_type, + "severity": issue_severity, + "description": issue_description, + "tags": issue_tags, + } + + # An issue is identified by the project_name, the issue_subject and the issue_type + matching_issue_list = list(filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())) + matching_issue_list_len = len(matching_issue_list) + + if matching_issue_list_len == 0: + # The issue does not exist in the project + if state == "present": + # This implies a change + changed = True + if not check_mode: + # Create the issue + new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, + description=issue_description) + if issue_attachment: + new_issue.attach(issue_attachment, description=issue_attachment_description) + issue["attachment"] = issue_attachment + issue["attachment_description"] = issue_attachment_description + return True, changed, "Issue created", issue + + else: + # If does not exist, do nothing + return True, changed, "Issue does not exist", {} + + elif matching_issue_list_len == 1: + # The issue exists in the project + if state == "absent": + # This implies a change + changed = True + if not check_mode: + # Delete the issue + matching_issue_list[0].delete() + return True, changed, "Issue deleted", {} + + else: + # Do nothing + return True, changed, "Issue already exists", {} + + else: + # More than 1 matching issue + return False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {} + + except TaigaException as exc: + msg = "An exception happened: %s" % to_native(exc) + return False, changed, msg, {} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + taiga_host=dict(type='str', required=False, default="https://api.taiga.io"), + project=dict(type='str', required=True), + subject=dict(type='str', required=True), + issue_type=dict(type='str', required=True), + priority=dict(type='str', required=False, default="Normal"), + status=dict(type='str', required=False, default="New"), + severity=dict(type='str', required=False, default="Normal"), + description=dict(type='str', required=False, default=""), + attachment=dict(type='path', required=False, default=None), + attachment_description=dict(type='str', required=False, default=""), + tags=dict(required=False, default=[], type='list', elements='str'), + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + ), + supports_check_mode=True + ) + + if not TAIGA_MODULE_IMPORTED: + module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR) + + taiga_host = module.params['taiga_host'] + project_name = module.params['project'] + issue_subject = module.params['subject'] + issue_priority = module.params['priority'] + issue_status = module.params['status'] + issue_type = module.params['issue_type'] + issue_severity = module.params['severity'] + issue_description = module.params['description'] + issue_attachment = module.params['attachment'] + issue_attachment_description = module.params['attachment_description'] + if issue_attachment: + if not isfile(issue_attachment): + msg = "%s is not a file" % issue_attachment + module.fail_json(msg=msg) + issue_tags = module.params['tags'] + state = module.params['state'] + + return_status, changed, msg, issue_attr_dict = manage_issue( + taiga_host, + project_name, + issue_subject, + issue_priority, + issue_status, + issue_type, + issue_severity, + issue_description, + issue_attachment, + issue_attachment_description, + issue_tags, + state, + check_mode=module.check_mode + ) + if return_status: + if issue_attr_dict: + module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) + else: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/telegram.py b/ansible_collections/community/general/plugins/modules/telegram.py new file mode 100644 index 000000000..d13e90fd5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/telegram.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Artem Feofanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: telegram +author: + - "Artem Feofanov (@tyouxa)" + - "Nikolai Lomov (@lomserman)" + +short_description: Send notifications via telegram + +description: + - Send notifications via telegram bot, to a verified group or user. + - Also, the user may try to use any other telegram bot API method, if you specify I(api_method) argument. +notes: + - You will require a telegram account and create telegram bot to use this module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - Token identifying your telegram bot. + required: true + api_method: + type: str + description: + - Bot API method. + - For reference, see U(https://core.telegram.org/bots/api). + default: SendMessage + version_added: 2.0.0 + api_args: + type: dict + description: + - Any parameters for the method. + - For reference to default method, C(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage). + version_added: 2.0.0 + +''' + +EXAMPLES = """ + +- name: Send notify to Telegram + community.general.telegram: + token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' + api_args: + chat_id: 000000 + parse_mode: "markdown" + text: "Your precious application has been deployed: https://example.com" + disable_web_page_preview: true + disable_notification: true + +- name: Forward message to someone + community.general.telegram: + token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX' + api_method: forwardMessage + api_args: + chat_id: 000000 + from_chat_id: 111111 + disable_notification: true + message_id: '{{ saved_msg_id }}' +""" + +RETURN = """ + +msg: + description: The message you attempted to send + returned: success + type: str + sample: "Ansible task finished" +telegram_error: + description: Error message gotten from Telegram API + returned: failure + type: str + sample: "Bad Request: message text is empty" +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +# noinspection PyUnresolvedReferences +from ansible.module_utils.six.moves.urllib.parse import quote +from ansible.module_utils.urls import fetch_url + + +def main(): + module = AnsibleModule( + argument_spec=dict( + token=dict(type='str', required=True, no_log=True), + api_args=dict(type='dict'), + api_method=dict(type="str", default="SendMessage"), + ), + supports_check_mode=True + ) + + token = quote(module.params.get('token')) + api_args = module.params.get('api_args') or {} + api_method = module.params.get('api_method') + # filling backward compatibility args + api_args['chat_id'] = api_args.get('chat_id') + api_args['parse_mode'] = api_args.get('parse_mode') + api_args['text'] = api_args.get('text') + + if api_args['parse_mode'] == 'plain': + del api_args['parse_mode'] + + url = 'https://api.telegram.org/bot{token}/{api_method}'.format(token=token, api_method=api_method) + + if module.check_mode: + module.exit_json(changed=False) + + response, info = fetch_url(module, url, method="POST", data=json.dumps(api_args), + headers={'Content-Type': 'application/json'}) + if info['status'] == 200: + module.exit_json(changed=True) + elif info['status'] == -1: + # SSL errors, connection problems, etc. + module.fail_json(msg="Failed to send message", info=info, response=response) + else: + body = json.loads(info['body']) + module.fail_json( + msg="Failed to send message, return status = {status}\n" + "url = {api_url}\n" + "api_args = {api_args}".format( + status=info['status'], api_url=url, api_args=api_args + ), + telegram_error=body['description'], + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/terraform.py b/ansible_collections/community/general/plugins/modules/terraform.py new file mode 100644 index 000000000..f9f809220 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/terraform.py @@ -0,0 +1,659 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Ryan Scott Brown +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: terraform +short_description: Manages a Terraform deployment (and plans) +description: + - Provides support for deploying resources with Terraform and pulling + resource information back into Ansible. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + choices: ['planned', 'present', 'absent'] + description: + - Goal state of given stage/project + type: str + default: present + binary_path: + description: + - The path of a terraform binary to use, relative to the 'service_path' + unless you supply an absolute path. + type: path + project_path: + description: + - The path to the root of the Terraform directory with the + vars.tf/main.tf/etc to use. + type: path + required: true + plugin_paths: + description: + - List of paths containing Terraform plugin executable files. + - Plugin executables can be downloaded from U(https://releases.hashicorp.com/). + - When set, the plugin discovery and auto-download behavior of Terraform is disabled. + - The directory structure in the plugin path can be tricky. The Terraform docs + U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) + show a simple directory of files, but actually, the directory structure + has to follow the same structure you would see if Terraform auto-downloaded the plugins. + See the examples below for a tree output of an example plugin directory. + type: list + elements: path + version_added: 3.0.0 + workspace: + description: + - The terraform workspace to work with. This sets the C(TF_WORKSPACE) environmental variable + that is used to override workspace selection. For more information about workspaces + have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces). + type: str + default: default + purge_workspace: + description: + - Only works with state = absent + - If true, the workspace will be deleted after the "terraform destroy" action. + - The 'default' workspace will not be deleted. + default: false + type: bool + plan_file: + description: + - The path to an existing Terraform plan file to apply. If this is not + specified, Ansible will build a new TF plan and execute it. + Note that this option is required if 'state' has the 'planned' value. + type: path + state_file: + description: + - The path to an existing Terraform state file to use when building plan. + If this is not specified, the default C(terraform.tfstate) will be used. + - This option is ignored when plan is specified. + type: path + variables_files: + description: + - The path to a variables file for Terraform to fill into the TF + configurations. This can accept a list of paths to multiple variables files. + - Up until Ansible 2.9, this option was usable as I(variables_file). + type: list + elements: path + aliases: [ 'variables_file' ] + variables: + description: + - A group of key-values pairs to override template variables or those in variables files. + By default, only string and number values are allowed, which are passed on unquoted. + - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when I(complex_vars=true). + - Ansible integers or floats are mapped to terraform numbers. + - Ansible strings are mapped to terraform strings. + - Ansible dictionaries are mapped to terraform objects. + - Ansible lists are mapped to terraform lists. + - Ansible booleans are mapped to terraform booleans. + - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use I(no_log=true) in production!" + type: dict + complex_vars: + description: + - Enable/disable capability to handle complex variable structures for C(terraform). + - If C(true) the I(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform). + Strings that are passed are correctly quoted. + - When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted. + type: bool + default: false + version_added: 5.7.0 + targets: + description: + - A list of specific resources to target in this plan/application. The + resources selected here will also auto-include any dependencies. + type: list + elements: str + default: [] + lock: + description: + - Enable statefile locking, if you use a service that accepts locks (such + as S3+DynamoDB) to store your statefile. + type: bool + default: true + lock_timeout: + description: + - How long to maintain the lock on the statefile, if you use a service + that accepts locks (such as S3+DynamoDB). + type: int + force_init: + description: + - To avoid duplicating infra, if a state file can't be found this will + force a C(terraform init). Generally, this should be turned off unless + you intend to provision an entirely new Terraform deployment. + default: false + type: bool + overwrite_init: + description: + - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path). + default: true + type: bool + version_added: '3.2.0' + backend_config: + description: + - A group of key-values to provide at init stage to the -backend-config parameter. + type: dict + backend_config_files: + description: + - The path to a configuration file to provide at init state to the -backend-config parameter. + This can accept a list of paths to multiple configuration files. + type: list + elements: path + version_added: '0.2.0' + provider_upgrade: + description: + - Allows Terraform init to upgrade providers to versions specified in the project's version constraints. + default: false + type: bool + version_added: 4.8.0 + init_reconfigure: + description: + - Forces backend reconfiguration during init. + default: false + type: bool + version_added: '1.3.0' + check_destroy: + description: + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, + but not "destroy and re-create" actions. This option is ignored when I(state=absent). + type: bool + default: false + version_added: '3.3.0' + parallelism: + description: + - Restrict concurrent operations when Terraform applies the plan. + type: int + version_added: '3.8.0' +notes: + - To just run a C(terraform plan), use check mode. +requirements: [ "terraform" ] +author: "Ryan Scott Brown (@ryansb)" +''' + +EXAMPLES = """ +- name: Basic deploy of a service + community.general.terraform: + project_path: '{{ project_dir }}' + state: present + +- name: Define the backend configuration at init + community.general.terraform: + project_path: 'project/' + state: "{{ state }}" + force_init: true + backend_config: + region: "eu-west-1" + bucket: "some-bucket" + key: "random.tfstate" + +- name: Define the backend configuration with one or more files at init + community.general.terraform: + project_path: 'project/' + state: "{{ state }}" + force_init: true + backend_config_files: + - /path/to/backend_config_file_1 + - /path/to/backend_config_file_2 + +- name: Disable plugin discovery and auto-download by setting plugin_paths + community.general.terraform: + project_path: 'project/' + state: "{{ state }}" + force_init: true + plugin_paths: + - /path/to/plugins_dir_1 + - /path/to/plugins_dir_2 + +- name: Complex variables example + community.general.terraform: + project_path: '{{ project_dir }}' + state: present + complex_vars: true + variables: + vm_name: "{{ inventory_hostname }}" + vm_vcpus: 2 + vm_mem: 2048 + vm_additional_disks: + - label: "Third Disk" + size: 40 + thin_provisioned: true + unit_number: 2 + - label: "Fourth Disk" + size: 22 + thin_provisioned: true + unit_number: 3 + force_init: true + +### Example directory structure for plugin_paths example +# $ tree /path/to/plugins_dir_1 +# /path/to/plugins_dir_1/ +# └── registry.terraform.io +# └── hashicorp +# └── vsphere +# ├── 1.24.0 +# │ └── linux_amd64 +# │ └── terraform-provider-vsphere_v1.24.0_x4 +# └── 1.26.0 +# └── linux_amd64 +# └── terraform-provider-vsphere_v1.26.0_x4 +""" + +RETURN = """ +outputs: + type: complex + description: A dictionary of all the TF outputs by their assigned name. Use C(.outputs.MyOutputName.value) to access the value. + returned: on success + sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}' + contains: + sensitive: + type: bool + returned: always + description: Whether Terraform has marked this value as sensitive + type: + type: str + returned: always + description: The type of the value (string, int, etc) + value: + type: str + returned: always + description: The value of the output as interpolated by Terraform +stdout: + type: str + description: Full C(terraform) command stdout, in case you want to display it or examine the event log + returned: always + sample: '' +command: + type: str + description: Full C(terraform) command built by this module, in case you want to re-run the command outside the module or debug a problem. + returned: always + sample: terraform apply ... +""" + +import os +import json +import tempfile +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils.six import integer_types + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +module = None + + +def get_version(bin_path): + extract_version = module.run_command([bin_path, 'version', '-json']) + terraform_version = (json.loads(extract_version[1]))['terraform_version'] + return terraform_version + + +def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): + if project_path is None or '/' not in project_path: + module.fail_json(msg="Path for Terraform project can not be None or ''.") + if not os.path.exists(bin_path): + module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) + if not os.path.isdir(project_path): + module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) + if LooseVersion(version) < LooseVersion('0.15.0'): + module.run_command([bin_path, 'validate', '-no-color'] + variables_args, check_rc=True, cwd=project_path) + else: + module.run_command([bin_path, 'validate', '-no-color'], check_rc=True, cwd=project_path) + + +def _state_args(state_file): + if not state_file: + return [] + if not os.path.exists(state_file): + module.warn('Could not find state_file "{0}", the process will not destroy any resources, please check your state file path.'.format(state_file)) + return ['-state', state_file] + + +def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace): + command = [bin_path, 'init', '-input=false', '-no-color'] + if backend_config: + for key, val in backend_config.items(): + command.extend([ + '-backend-config', + shlex_quote('{0}={1}'.format(key, val)) + ]) + if backend_config_files: + for f in backend_config_files: + command.extend(['-backend-config', f]) + if init_reconfigure: + command.extend(['-reconfigure']) + if provider_upgrade: + command.extend(['-upgrade']) + if plugin_paths: + for plugin_path in plugin_paths: + command.extend(['-plugin-dir', plugin_path]) + rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace}) + + +def get_workspace_context(bin_path, project_path): + workspace_ctx = {"current": "default", "all": []} + command = [bin_path, 'workspace', 'list', '-no-color'] + rc, out, err = module.run_command(command, cwd=project_path) + if rc != 0: + module.warn("Failed to list Terraform workspaces:\n{0}".format(err)) + for item in out.split('\n'): + stripped_item = item.strip() + if not stripped_item: + continue + elif stripped_item.startswith('* '): + workspace_ctx["current"] = stripped_item.replace('* ', '') + workspace_ctx["all"].append(stripped_item.replace('* ', '')) + else: + workspace_ctx["all"].append(stripped_item) + return workspace_ctx + + +def _workspace_cmd(bin_path, project_path, action, workspace): + command = [bin_path, 'workspace', action, workspace, '-no-color'] + rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) + return rc, out, err + + +def create_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'new', workspace) + + +def select_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'select', workspace) + + +def remove_workspace(bin_path, project_path, workspace): + _workspace_cmd(bin_path, project_path, 'delete', workspace) + + +def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None): + if plan_path is None: + f, plan_path = tempfile.mkstemp(suffix='.tfplan') + + local_command = command[:] + + plan_command = [command[0], 'plan'] + + if state == "planned": + for c in local_command[1:]: + plan_command.append(c) + + if state == "present": + for a in apply_args: + local_command.remove(a) + for c in local_command[1:]: + plan_command.append(c) + + plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]) + + for t in targets: + plan_command.extend(['-target', t]) + + plan_command.extend(_state_args(state_file)) + + rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path) + + if rc == 0: + # no changes + return plan_path, False, out, err, plan_command if state == 'planned' else command + elif rc == 1: + # failure to plan + module.fail_json( + msg='Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format( + out=out, + err=err, + cmd=' '.join(plan_command), + args=' '.join([shlex_quote(arg) for arg in variables_args]) + ) + ) + elif rc == 2: + # changes, but successful + return plan_path, True, out, err, plan_command if state == 'planned' else command + + module.fail_json(msg='Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format( + rc=rc, + out=out, + err=err, + cmd=' '.join(plan_command), + args=' '.join([shlex_quote(arg) for arg in variables_args]) + )) + + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + project_path=dict(required=True, type='path'), + binary_path=dict(type='path'), + plugin_paths=dict(type='list', elements='path'), + workspace=dict(type='str', default='default'), + purge_workspace=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent', 'planned']), + variables=dict(type='dict'), + complex_vars=dict(type='bool', default=False), + variables_files=dict(aliases=['variables_file'], type='list', elements='path'), + plan_file=dict(type='path'), + state_file=dict(type='path'), + targets=dict(type='list', elements='str', default=[]), + lock=dict(type='bool', default=True), + lock_timeout=dict(type='int',), + force_init=dict(type='bool', default=False), + backend_config=dict(type='dict'), + backend_config_files=dict(type='list', elements='path'), + init_reconfigure=dict(type='bool', default=False), + overwrite_init=dict(type='bool', default=True), + check_destroy=dict(type='bool', default=False), + parallelism=dict(type='int'), + provider_upgrade=dict(type='bool', default=False), + ), + required_if=[('state', 'planned', ['plan_file'])], + supports_check_mode=True, + ) + + project_path = module.params.get('project_path') + bin_path = module.params.get('binary_path') + plugin_paths = module.params.get('plugin_paths') + workspace = module.params.get('workspace') + purge_workspace = module.params.get('purge_workspace') + state = module.params.get('state') + variables = module.params.get('variables') or {} + complex_vars = module.params.get('complex_vars') + variables_files = module.params.get('variables_files') + plan_file = module.params.get('plan_file') + state_file = module.params.get('state_file') + force_init = module.params.get('force_init') + backend_config = module.params.get('backend_config') + backend_config_files = module.params.get('backend_config_files') + init_reconfigure = module.params.get('init_reconfigure') + overwrite_init = module.params.get('overwrite_init') + check_destroy = module.params.get('check_destroy') + provider_upgrade = module.params.get('provider_upgrade') + + if bin_path is not None: + command = [bin_path] + else: + command = [module.get_bin_path('terraform', required=True)] + + checked_version = get_version(command[0]) + + if LooseVersion(checked_version) < LooseVersion('0.15.0'): + DESTROY_ARGS = ('destroy', '-no-color', '-force') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') + else: + DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') + + if force_init: + if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): + init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace) + + workspace_ctx = get_workspace_context(command[0], project_path) + if workspace_ctx["current"] != workspace: + if workspace not in workspace_ctx["all"]: + create_workspace(command[0], project_path, workspace) + else: + select_workspace(command[0], project_path, workspace) + + if state == 'present': + command.extend(APPLY_ARGS) + elif state == 'absent': + command.extend(DESTROY_ARGS) + + if state == 'present' and module.params.get('parallelism') is not None: + command.append('-parallelism=%d' % module.params.get('parallelism')) + + def format_args(vars): + if isinstance(vars, str): + return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"')) + elif isinstance(vars, bool): + if vars: + return 'true' + else: + return 'false' + return str(vars) + + def process_complex_args(vars): + ret_out = [] + if isinstance(vars, dict): + for k, v in vars.items(): + if isinstance(v, dict): + ret_out.append('{0}={{{1}}}'.format(k, process_complex_args(v))) + elif isinstance(v, list): + ret_out.append("{0}={1}".format(k, process_complex_args(v))) + elif isinstance(v, (integer_types, float, str, bool)): + ret_out.append('{0}={1}'.format(k, format_args(v))) + else: + # only to handle anything unforeseen + module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.") + if isinstance(vars, list): + l_out = [] + for item in vars: + if isinstance(item, dict): + l_out.append("{{{0}}}".format(process_complex_args(item))) + elif isinstance(item, list): + l_out.append("{0}".format(process_complex_args(item))) + elif isinstance(item, (str, integer_types, float, bool)): + l_out.append(format_args(item)) + else: + # only to handle anything unforeseen + module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.") + + ret_out.append("[{0}]".format(",".join(l_out))) + return ",".join(ret_out) + + variables_args = [] + if complex_vars: + for k, v in variables.items(): + if isinstance(v, dict): + variables_args.extend([ + '-var', + '{0}={{{1}}}'.format(k, process_complex_args(v)) + ]) + elif isinstance(v, list): + variables_args.extend([ + '-var', + '{0}={1}'.format(k, process_complex_args(v)) + ]) + # on the top-level we need to pass just the python string with necessary + # terraform string escape sequences + elif isinstance(v, str): + variables_args.extend([ + '-var', + "{0}={1}".format(k, v) + ]) + else: + variables_args.extend([ + '-var', + '{0}={1}'.format(k, format_args(v)) + ]) + else: + for k, v in variables.items(): + variables_args.extend([ + '-var', + '{0}={1}'.format(k, v) + ]) + + if variables_files: + for f in variables_files: + variables_args.extend(['-var-file', f]) + + preflight_validation(command[0], project_path, checked_version, variables_args) + + if module.params.get('lock') is not None: + if module.params.get('lock'): + command.append('-lock=true') + else: + command.append('-lock=false') + if module.params.get('lock_timeout') is not None: + command.append('-lock-timeout=%ds' % module.params.get('lock_timeout')) + + for t in (module.params.get('targets') or []): + command.extend(['-target', t]) + + # we aren't sure if this plan will result in changes, so assume yes + needs_application, changed = True, False + + out, err = '', '' + + if state == 'absent': + command.extend(variables_args) + elif state == 'present' and plan_file: + if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]): + command.append(plan_file) + else: + module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file)) + else: + plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, + module.params.get('targets'), state, APPLY_ARGS, plan_file) + if state == 'present' and check_destroy and '- destroy' in out: + module.fail_json(msg="Aborting command because it would destroy some resources. " + "Consider switching the 'check_destroy' to false to suppress this error") + command.append(plan_file) + + if needs_application and not module.check_mode and state != 'planned': + rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) + if rc != 0: + if workspace_ctx["current"] != workspace: + select_workspace(command[0], project_path, workspace_ctx["current"]) + module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, + stdout_lines=out.splitlines(), stderr=err, + stderr_lines=err.splitlines(), + cmd=' '.join(command)) + # checks out to decide if changes were made during execution + if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: + changed = True + + outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file) + rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path) + outputs = {} + if rc == 1: + module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err)) + elif rc != 0: + module.fail_json( + msg="Failure when getting Terraform outputs. " + "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err), + command=' '.join(outputs_command)) + else: + outputs = json.loads(outputs_text) + + # Restore the Terraform workspace found when running the module + if workspace_ctx["current"] != workspace: + select_workspace(command[0], project_path, workspace_ctx["current"]) + if state == 'absent' and workspace != 'default' and purge_workspace is True: + remove_workspace(command[0], project_path, workspace) + + module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/timezone.py b/ansible_collections/community/general/plugins/modules/timezone.py new file mode 100644 index 000000000..05849e4bb --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/timezone.py @@ -0,0 +1,923 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Shinichi TAMURA (@tmshn) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock and of the hardware clock. + If you want to set up the NTP, use M(ansible.builtin.service) module. + - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time. + - Several different tools are used depending on the OS/Distribution involved. + For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock). + On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified. + On AIX, C(chtz) is used. + - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed, + when not using a minimal installation like Alpine Linux). + - As of Ansible 2.3 support was added for SmartOS and BSDs. + - As of Ansible 2.4 support was added for macOS. + - As of Ansible 2.9 support was added for AIX 6.1+ + - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - Name of the timezone for the system clock. + - Default is to keep current setting. + - B(At least one of name and hwclock are required.) + type: str + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + - Default is to keep current setting. + - Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + - B(At least one of name and hwclock are required.) + - I(Only used on Linux.) + type: str + aliases: [ rtc ] + choices: [ local, UTC ] +notes: + - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone + - On AIX only Olson/tz database timezones are useable (POSIX is not supported). + - An OS reboot is also required on AIX for the new timezone setting to take effect. +author: + - Shinichi TAMURA (@tmshn) + - Jasper Lievisse Adriaanse (@jasperla) + - Indrajit Raychaudhuri (@indrajitr) +''' + +RETURN = r''' +diff: + description: The differences about the given arguments. + returned: success + type: complex + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = r''' +- name: Set timezone to Asia/Tokyo + community.general.timezone: + name: Asia/Tokyo +''' + +import errno +import os +import platform +import random +import re +import string +import filecmp + +from ansible.module_utils.basic import AnsibleModule, get_distribution +from ansible.module_utils.six import iteritems + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it needs to judge based + on whether the `timedatectl` command exists and is available. + + Args: + module: The AnsibleModule. + """ + if platform.system() == 'Linux': + timedatectl = module.get_bin_path('timedatectl') + if timedatectl is not None: + rc, stdout, stderr = module.run_command(timedatectl) + if rc == 0: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + module.debug('timedatectl command was found but not usable: %s. using other method.' % stderr) + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + elif re.match('^joyent_.*Z', platform.version()): + # platform.system() returns SunOS, which is too broad. So look at the + # platform version instead. However we have to ensure that we're not + # running in the global zone where changing the timezone has no effect. + zonename_cmd = module.get_bin_path('zonename') + if zonename_cmd is not None: + (rc, stdout, dummy) = module.run_command(zonename_cmd) + if rc == 0 and stdout.strip() == 'global': + module.fail_json(msg='Adjusting timezone is not supported in Global Zone') + + return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone) + elif platform.system() == 'Darwin': + return super(Timezone, DarwinTimezone).__new__(DarwinTimezone) + elif re.match('^(Free|Net|Open)BSD', platform.platform()): + return super(Timezone, BSDTimezone).__new__(BSDTimezone) + elif platform.system() == 'AIX': + AIXoslevel = int(platform.version() + platform.release()) + if AIXoslevel >= 61: + return super(Timezone, AIXTimezone).__new__(AIXTimezone) + else: + module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + return tzfile + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class for systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name=None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + # It's fine if all tree config files don't exist + allow_no_file = dict( + name=True, + hwclock=True, + adjtime=True + ) + + regexps = dict( + name=None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + dist_regexps = dict( + SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE), + redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + ) + + dist_tzline_format = dict( + SuSE='TIMEZONE="%s"\n', + redhat='ZONE="%s"\n' + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + planned_tz = '' + if 'name' in self.value: + tzfile = self._verify_timezone() + planned_tz = self.value['name']['planned'] + # `--remove-destination` is needed if /etc/localtime is a symlink so + # that it overwrites it instead of following it. + self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + distribution = get_distribution() + self.conf_files['name'] = '/etc/timezone' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + if 'name' in self.value: + self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), + '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] + self.conf_files['hwclock'] = '/etc/default/rcS' + elif distribution == 'Alpine' or distribution == 'Gentoo': + self.conf_files['hwclock'] = '/etc/conf.d/hwclock' + if distribution == 'Alpine': + self.update_timezone = ['%s -z %s' % (self.module.get_bin_path('setup-timezone', required=True), planned_tz)] + else: + # RHEL/CentOS/SUSE + if self.module.get_bin_path('tzdata-update') is not None: + # tzdata-update cannot update the timezone if /etc/localtime is + # a symlink so we have to use cp to update the time zone which + # was set above. + if not os.path.islink('/etc/localtime'): + self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)] + # else: + # self.update_timezone = 'cp --remove-destination ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + try: + f = open(self.conf_files['name'], 'r') + except IOError as err: + if self._allow_ioerror(err, 'name'): + # If the config file doesn't exist detect the distribution and set regexps. + if distribution == 'SuSE': + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + else: + self.abort('could not read configuration file "%s"' % self.conf_files['name']) + else: + # The key for timezone might be `ZONE` or `TIMEZONE` + # (the former is used in RHEL/CentOS and the latter is used in SUSE linux). + # So check the content of /etc/sysconfig/clock and decide which key to use. + sysconfig_clock = f.read() + f.close() + if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE): + # For SUSE + self.regexps['name'] = self.dist_regexps['SuSE'] + self.tzline_format = self.dist_tzline_format['SuSE'] + else: + # For RHEL/CentOS + self.regexps['name'] = self.dist_regexps['redhat'] + self.tzline_format = self.dist_tzline_format['redhat'] + + def _allow_ioerror(self, err, key): + # In some cases, even if the target file does not exist, + # simply creating it may solve the problem. + # In such cases, we should continue the configuration rather than aborting. + if err.errno != errno.ENOENT: + # If the error is not ENOENT ("No such file or directory"), + # (e.g., permission error, etc), we should abort. + return False + return self.allow_no_file.get(key, False) + + def _edit_file(self, filename, regexp, value, key): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + key: For what key the file is being editted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError as err: + if self._allow_ioerror(err, key): + lines = [] + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename)) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def _get_value_from_config(self, key, phase): + filename = self.conf_files[key] + try: + file = open(filename, mode='r') + except IOError as err: + if self._allow_ioerror(err, key): + if key == 'hwclock': + return 'n/a' + elif key == 'adjtime': + return 'UTC' + elif key == 'name': + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + if key == 'hwclock': + # If we cannot find UTC in the config that's fine. + return 'n/a' + elif key == 'adjtime': + # If we cannot find UTC/LOCAL in /etc/cannot that means UTC + # will be used by default. + return 'UTC' + elif key == 'name': + if phase == 'before': + # In 'before' phase UTC/LOCAL doesn't need to be set in + # the timezone config file, so we ignore this error. + return 'n/a' + else: + self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename)) + else: + if key == 'hwclock': + # convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def get(self, key, phase): + planned = self.value[key]['planned'] + if key == 'hwclock': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the value in the config file is the same as the 'planned' + # value, we need to check /etc/adjtime. + value = self._get_value_from_config('adjtime', phase) + elif key == 'name': + value = self._get_value_from_config(key, phase) + if value == planned: + # If the planned values is the same as the one in the config file + # we need to check if /etc/localtime is also set to the 'planned' zone. + if os.path.islink('/etc/localtime'): + # If /etc/localtime is a symlink and is not set to the TZ we 'planned' + # to set, we need to return the TZ which the symlink points to. + if os.path.exists('/etc/localtime'): + # We use readlink() because on some distros zone files are symlinks + # to other zone files, so it's hard to get which TZ is actually set + # if we follow the symlink. + path = os.readlink('/etc/localtime') + # most linuxes has it in /usr/share/zoneinfo + # alpine linux links under /etc/zoneinfo + linktz = re.search(r'(?:/(?:usr/share|etc)/zoneinfo/)(.*)', path, re.MULTILINE) + if linktz: + valuelink = linktz.group(1) + if valuelink != planned: + value = valuelink + else: + # Set current TZ to 'n/a' if the symlink points to a path + # which isn't a zone file. + value = 'n/a' + else: + # Set current TZ to 'n/a' if the symlink to the zone file is broken. + value = 'n/a' + else: + # If /etc/localtime is not a symlink best we can do is compare it with + # the 'planned' zone info file and return 'n/a' if they are different. + try: + if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned): + return 'n/a' + except Exception: + return 'n/a' + else: + self.abort('unknown parameter "%s"' % key) + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value, + key='name') + for cmd in self.update_timezone: + self.execute(cmd) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + utc = 'no' + else: + option = '--utc' + utc = 'yes' + if self.conf_files['hwclock'] is not None: + self._edit_file(filename=self.conf_files['hwclock'], + regexp=self.regexps['hwclock'], + value='UTC=%s\n' % utc, + key='hwclock') + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +class SmartOSTimezone(Timezone): + """This is a Timezone manipulation class for SmartOS instances. + + It uses the C(sm-set-timezone) utility to set the timezone, and + inspects C(/etc/default/init) to determine the current timezone. + + NB: A zone needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(SmartOSTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False) + if not self.settimezone: + module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.') + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/default/init`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + try: + f = open('/etc/default/init', 'r') + for line in f: + m = re.match('^TZ=(.*)$', line.strip()) + if m: + return m.groups()[0] + except Exception: + self.module.fail_json(msg='Failed to read /etc/default/init') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through sm-set-timezone, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + cmd = 'sm-set-timezone %s' % value + + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # sm-set-timezone knows no state and will always set the timezone. + # XXX: https://github.com/joyent/smtools/pull/2 + m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1]) + if not (m and m.groups()[-1] == value): + self.module.fail_json(msg='Failed to set timezone') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class DarwinTimezone(Timezone): + """This is the timezone implementation for Darwin which, unlike other *BSD + implementations, uses the `systemsetup` command on Darwin to check/set + the timezone. + """ + + regexps = dict( + name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + def __init__(self, module): + super(DarwinTimezone, self).__init__(module) + self.systemsetup = module.get_bin_path('systemsetup', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_current_timezone(self, phase): + """Lookup the current timezone via `systemsetup -gettimezone`.""" + if phase not in self.status: + self.status[phase] = self.execute(self.systemsetup, '-gettimezone') + return self.status[phase] + + def _verify_timezone(self): + tz = self.value['name']['planned'] + # Lookup the list of supported timezones via `systemsetup -listtimezones`. + # Note: Skip the first line that contains the label 'Time Zones:' + out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:] + tz_list = list(map(lambda x: x.strip(), out)) + if tz not in tz_list: + self.abort('given timezone "%s" is not available' % tz) + return tz + + def get(self, key, phase): + if key == 'name': + status = self._get_current_timezone(phase) + value = self.regexps[key].search(status).group(1) + return value + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + self.execute(self.systemsetup, '-settimezone', value, log=True) + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class BSDTimezone(Timezone): + """This is the timezone implementation for *BSD which works simply through + updating the `/etc/localtime` symlink to point to a valid timezone name under + `/usr/share/zoneinfo`. + """ + + def __init__(self, module): + super(BSDTimezone, self).__init__(module) + + def __get_timezone(self): + zoneinfo_dir = '/usr/share/zoneinfo/' + localtime_file = '/etc/localtime' + + # Strategy 1: + # If /etc/localtime does not exist, assum the timezone is UTC. + if not os.path.exists(localtime_file): + self.module.warn('Could not read /etc/localtime. Assuming UTC.') + return 'UTC' + + # Strategy 2: + # Follow symlink of /etc/localtime + zoneinfo_file = localtime_file + while not zoneinfo_file.startswith(zoneinfo_dir): + try: + zoneinfo_file = os.readlink(localtime_file) + except OSError: + # OSError means "end of symlink chain" or broken link. + break + else: + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 3: + # (If /etc/localtime is not symlinked) + # Check all files in /usr/share/zoneinfo and return first non-link match. + for dname, dummy, fnames in sorted(os.walk(zoneinfo_dir)): + for fname in sorted(fnames): + zoneinfo_file = os.path.join(dname, fname) + if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): + return zoneinfo_file.replace(zoneinfo_dir, '') + + # Strategy 4: + # As a fall-back, return 'UTC' as default assumption. + self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.') + return 'UTC' + + def get(self, key, phase): + """Lookup the current timezone by resolving `/etc/localtime`.""" + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + if key == 'name': + # First determine if the requested timezone is valid by looking in + # the zoneinfo directory. + zonefile = '/usr/share/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone' % value) + except Exception: + self.module.fail_json(msg='Failed to stat %s' % zonefile) + + # Now (somewhat) atomically update the symlink by creating a new + # symlink and move it into place. Otherwise we have to remove the + # original symlink and create the new symlink, however that would + # create a race condition in case another process tries to read + # /etc/localtime between removal and creation. + suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)]) + new_localtime = '/etc/localtime.' + suffix + + try: + os.symlink(zonefile, new_localtime) + os.rename(new_localtime, '/etc/localtime') + except Exception: + os.remove(new_localtime) + self.module.fail_json(msg='Could not update /etc/localtime') + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +class AIXTimezone(Timezone): + """This is a Timezone manipulation class for AIX instances. + + It uses the C(chtz) utility to set the timezone, and + inspects C(/etc/environment) to determine the current timezone. + + While AIX time zones can be set using two formats (POSIX and + Olson) the preferred method is Olson. + See the following article for more information: + https://developer.ibm.com/articles/au-aix-posix/ + + NB: AIX needs to be rebooted in order for the change to be + activated. + """ + + def __init__(self, module): + super(AIXTimezone, self).__init__(module) + self.settimezone = self.module.get_bin_path('chtz', required=True) + + def __get_timezone(self): + """ Return the current value of TZ= in /etc/environment """ + try: + f = open('/etc/environment', 'r') + etcenvironment = f.read() + f.close() + except Exception: + self.module.fail_json(msg='Issue reading contents of /etc/environment') + + match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE) + if match: + return match.group(1) + else: + return None + + def get(self, key, phase): + """Lookup the current timezone name in `/etc/environment`. If anything else + is requested, or if the TZ field is not set we fail. + """ + if key == 'name': + return self.__get_timezone() + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + def set(self, key, value): + """Set the requested timezone through chtz, an invalid timezone name + will be rejected and we have no further input validation to perform. + """ + if key == 'name': + # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values. + # It will only return non-zero if the chtz command itself fails, it does not check for + # valid timezones. We need to perform a basic check to confirm that the timezone + # definition exists in /usr/share/lib/zoneinfo + # This does mean that we can only support Olson for now. The below commented out regex + # detects Olson date formats, so in the future we could detect Posix or Olson and + # act accordingly. + + # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE) + # if not regex_olson.match(value): + # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value + # self.module.fail_json(msg=msg) + + # First determine if the requested timezone is valid by looking in the zoneinfo + # directory. + zonefile = '/usr/share/lib/zoneinfo/' + value + try: + if not os.path.isfile(zonefile): + self.module.fail_json(msg='%s is not a recognized timezone.' % value) + except Exception: + self.module.fail_json(msg='Failed to check %s.' % zonefile) + + # Now set the TZ using chtz + cmd = 'chtz %s' % value + (rc, stdout, stderr) = self.module.run_command(cmd) + + if rc != 0: + self.module.fail_json(msg=stderr) + + # The best condition check we can do is to check the value of TZ after making the + # change. + TZ = self.__get_timezone() + if TZ != value: + msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value) + self.module.fail_json(msg=msg) + + else: + self.module.fail_json(msg='%s is not a supported option on target platform' % key) + + +def main(): + # Construct 'module' and 'tz' + module = AnsibleModule( + argument_spec=dict( + hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']), + name=dict(type='str'), + ), + required_one_of=[ + ['hwclock', 'name'] + ], + supports_check_mode=True, + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made - ' + 'planned: %s, after: %s' % (str(planned), str(after))) + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/twilio.py b/ansible_collections/community/general/plugins/modules/twilio.py new file mode 100644 index 000000000..270320c46 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/twilio.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: twilio +short_description: Sends a text message to a mobile phone through Twilio +description: + - Sends a text message to a phone number through the Twilio messaging API. +notes: + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need a Twilio account with + a purchased or verified phone number to send the text message. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + account_sid: + type: str + description: + - User's Twilio account token found on the account page. + required: true + auth_token: + type: str + description: + - User's Twilio authentication token. + required: true + msg: + type: str + description: + - The body of the text message. + required: true + to_numbers: + type: list + elements: str + description: + - One or more phone numbers to send the text message to, format C(+15551112222). + required: true + aliases: [ to_number ] + from_number: + type: str + description: + - The Twilio number to send the text message from, format C(+15551112222). + required: true + media_url: + type: str + description: + - A URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS. + required: false + +author: "Matt Makai (@makaimc)" +''' + +EXAMPLES = ''' +# send an SMS about the build status to (555) 303 5681 +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- name: Send a text message to a mobile phone through Twilio + community.general.twilio: + msg: All servers with webserver role are now configured. + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15552014545 + to_number: +15553035681 + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- name: Send a text message to a mobile phone through Twilio + community.general.twilio: + msg: This server configuration is now complete. + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15553258899 + to_numbers: + - +15551113232 + - +12025551235 + - +19735559010 + delegate_to: localhost + +# send an MMS to a single recipient with an update on the deployment +# and an image of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- name: Send a text message to a mobile phone through Twilio + community.general.twilio: + msg: Deployment complete! + account_sid: ACXXXXXXXXXXXXXXXXX + auth_token: ACXXXXXXXXXXXXXXXXX + from_number: +15552014545 + to_number: +15553035681 + media_url: https://demo.twilio.com/logo.png + delegate_to: localhost +''' + +# ======================================= +# twilio module support methods +# +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url + + +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number, media_url=None): + URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ + % (account_sid,) + AGENT = "Ansible" + + data = {'From': from_number, 'To': to_number, 'Body': msg} + if media_url: + data['MediaUrl'] = media_url + encoded_data = urlencode(data) + + headers = {'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json', + } + + # Hack module params to have the Basic auth params that fetch_url expects + module.params['url_username'] = account_sid.replace('\n', '') + module.params['url_password'] = auth_token.replace('\n', '') + + return fetch_url(module, URI, data=encoded_data, headers=headers) + + +# ======================================= +# Main +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_sid=dict(required=True), + auth_token=dict(required=True, no_log=True), + msg=dict(required=True), + from_number=dict(required=True), + to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), + media_url=dict(default=None, required=False), + ), + supports_check_mode=True + ) + + account_sid = module.params['account_sid'] + auth_token = module.params['auth_token'] + msg = module.params['msg'] + from_number = module.params['from_number'] + to_numbers = module.params['to_numbers'] + media_url = module.params['media_url'] + + for number in to_numbers: + r, info = post_twilio_api(module, account_sid, auth_token, msg, + from_number, number, media_url) + if info['status'] not in [200, 201]: + body_message = "unknown error" + if 'body' in info: + body = module.from_json(info['body']) + body_message = body['message'] + module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) + + module.exit_json(msg=msg, changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/typetalk.py b/ansible_collections/community/general/plugins/modules/typetalk.py new file mode 100644 index 000000000..ddf9f3560 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/typetalk.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: typetalk +short_description: Send a message to typetalk +description: + - Send a message to typetalk using typetalk API +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + client_id: + type: str + description: + - OAuth2 client ID + required: true + client_secret: + type: str + description: + - OAuth2 client secret + required: true + topic: + type: int + description: + - topic id to post message + required: true + msg: + type: str + description: + - message body + required: true +requirements: [ json ] +author: "Takashi Someda (@tksmd)" +''' + +EXAMPLES = ''' +- name: Send a message to typetalk + community.general.typetalk: + client_id: 12345 + client_secret: 12345 + topic: 1 + msg: install completed +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url, ConnectionError + + +def do_request(module, url, params, headers=None): + data = urlencode(params) + if headers is None: + headers = dict() + headers = dict(headers, **{ + 'User-Agent': 'Ansible/typetalk module', + }) + r, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] != 200: + exc = ConnectionError(info['msg']) + exc.code = info['status'] + raise exc + return r + + +def get_access_token(module, client_id, client_secret): + params = { + 'client_id': client_id, + 'client_secret': client_secret, + 'grant_type': 'client_credentials', + 'scope': 'topic.post' + } + res = do_request(module, 'https://typetalk.com/oauth2/access_token', params) + return json.load(res)['access_token'] + + +def send_message(module, client_id, client_secret, topic, msg): + """ + send message to typetalk + """ + try: + access_token = get_access_token(module, client_id, client_secret) + url = 'https://typetalk.com/api/v1/topics/%d' % topic + headers = { + 'Authorization': 'Bearer %s' % access_token, + } + do_request(module, url, {'message': msg}, headers) + return True, {'access_token': access_token} + except ConnectionError as e: + return False, e + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + client_id=dict(required=True), + client_secret=dict(required=True, no_log=True), + topic=dict(required=True, type='int'), + msg=dict(required=True), + ), + supports_check_mode=False + ) + + if not json: + module.fail_json(msg="json module is required") + + client_id = module.params["client_id"] + client_secret = module.params["client_secret"] + topic = module.params["topic"] + msg = module.params["msg"] + + res, error = send_message(module, client_id, client_secret, topic, msg) + if not res: + module.fail_json(msg='fail to send message with response code %s' % error.code) + + module.exit_json(changed=True, topic=topic, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_record.py b/ansible_collections/community/general/plugins/modules/udm_dns_record.py new file mode 100644 index 000000000..849c84a2d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/udm_dns_record.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: udm_dns_record +author: + - Tobias Rüetschi (@keachi) +short_description: Manage dns entries on a univention corporate server +description: + - "This module allows to manage dns records on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Univention + - ipaddress (for I(type=ptr_record)) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial +options: + state: + type: str + default: "present" + choices: [ present, absent ] + description: + - Whether the dns record is present or not. + name: + type: str + required: true + description: + - "Name of the record, this is also the DNS record. E.g. www for + www.example.com." + - For PTR records this has to be the IP address. + zone: + type: str + required: true + description: + - Corresponding DNS zone for this record, e.g. example.com. + - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)). + type: + type: str + required: true + description: + - "Define the record type. C(host_record) is a A or AAAA record, + C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record) + is a SRV record and C(txt_record) is a TXT record." + - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)." + data: + type: dict + default: {} + description: + - "Additional data for this record, e.g. ['a': '192.0.2.1']. + Required if I(state=present)." +''' + + +EXAMPLES = ''' +- name: Create a DNS record on a UCS + community.general.udm_dns_record: + name: www + zone: example.com + type: host_record + data: + a: + - 192.0.2.1 + - 2001:0db8::42 + +- name: Create a DNS v4 PTR record on a UCS + community.general.udm_dns_record: + name: 192.0.2.1 + zone: 2.0.192.in-addr.arpa + type: ptr_record + data: + ptr_record: "www.example.com." + +- name: Create a DNS v6 PTR record on a UCS + community.general.udm_dns_record: + name: 2001:db8:0:0:0:ff00:42:8329 + zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa + type: ptr_record + data: + ptr_record: "www.example.com." +''' + + +RETURN = '''#''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, + config, + uldap, +) + + +with deps.declare("univention", msg="This module requires univention python bindings"): + from univention.admin.handlers.dns import ( + forward_zone, + reverse_zone, + ) + +with deps.declare("ipaddress"): + import ipaddress + + +def main(): + module = AnsibleModule( + argument_spec=dict( + type=dict(required=True, type='str'), + zone=dict(required=True, type='str'), + name=dict(required=True, type='str'), + data=dict(default={}, type='dict'), + state=dict(default='present', choices=['present', 'absent'], type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['data']) + ]) + ) + + deps.validate(module, "univention") + + type = module.params['type'] + zone = module.params['zone'] + name = module.params['name'] + data = module.params['data'] + state = module.params['state'] + changed = False + diff = None + + workname = name + if type == 'ptr_record': + deps.validate(module, "ipaddress") + + try: + if 'arpa' not in zone: + raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") + ipaddr_rev = ipaddress.ip_address(name).reverse_pointer + subnet_offset = ipaddr_rev.find(zone) + if subnet_offset == -1: + raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev)) + workname = ipaddr_rev[0:subnet_offset - 1] + except Exception as e: + module.fail_json( + msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e) + ) + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname), + attr=['dNSZone'] + )) + exists = bool(len(obj)) + container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) + dn = 'relativeDomainName={0},{1}'.format(workname, container) + + if state == 'present': + try: + if not exists: + so = forward_zone.lookup( + config(), + uldap(), + '(zone={0})'.format(zone), + scope='domain', + ) or reverse_zone.lookup( + config(), + uldap(), + '(zoneName={0})'.format(zone), + scope='domain', + ) + if not so == 0: + raise Exception("Did not find zone '{0}' in Univention".format(zone)) + obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) + else: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + + if type == 'ptr_record': + obj['ip'] = name + obj['address'] = workname + else: + obj['name'] = name + + for k, v in data.items(): + obj[k] = v + diff = obj.diff() + changed = obj.diff() != [] + if not module.check_mode: + if not exists: + obj.create() + else: + obj.modify() + except Exception as e: + module.fail_json( + msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as e: + module.fail_json( + msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_zone.py b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py new file mode 100644 index 000000000..19f24fa1c --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: udm_dns_zone +author: + - Tobias Rüetschi (@keachi) +short_description: Manage dns zones on a univention corporate server +description: + - "This module allows to manage dns zones on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial +options: + state: + type: str + default: "present" + choices: [ present, absent ] + description: + - Whether the dns zone is present or not. + type: + type: str + required: true + description: + - Define if the zone is a forward or reverse DNS zone. + - "The available choices are: C(forward_zone), C(reverse_zone)." + zone: + type: str + required: true + description: + - DNS zone name, e.g. C(example.com). + aliases: [name] + nameserver: + type: list + elements: str + default: [] + description: + - List of appropriate name servers. Required if I(state=present). + interfaces: + type: list + elements: str + default: [] + description: + - List of interface IP addresses, on which the server should + response this zone. Required if I(state=present). + + refresh: + type: int + default: 3600 + description: + - Interval before the zone should be refreshed. + retry: + type: int + default: 1800 + description: + - Interval that should elapse before a failed refresh should be retried. + expire: + type: int + default: 604800 + description: + - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. + ttl: + type: int + default: 600 + description: + - Minimum TTL field that should be exported with any RR from this zone. + + contact: + type: str + default: '' + description: + - Contact person in the SOA record. + mx: + type: list + elements: str + default: [] + description: + - List of MX servers. (Must declared as A or AAAA records). +''' + + +EXAMPLES = ''' +- name: Create a DNS zone on a UCS + community.general.udm_dns_zone: + zone: example.com + type: forward_zone + nameserver: + - ucs.example.com + interfaces: + - 192.0.2.1 +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def convert_time(time): + """Convert a time in seconds into the biggest unit""" + units = [ + (24 * 60 * 60, 'days'), + (60 * 60, 'hours'), + (60, 'minutes'), + (1, 'seconds'), + ] + + if time == 0: + return ('0', 'seconds') + for unit in units: + if time >= unit[0]: + return ('{0}'.format(time // unit[0]), unit[1]) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + type=dict(required=True, + type='str'), + zone=dict(required=True, + aliases=['name'], + type='str'), + nameserver=dict(default=[], + type='list', + elements='str'), + interfaces=dict(default=[], + type='list', + elements='str'), + refresh=dict(default=3600, + type='int'), + retry=dict(default=1800, + type='int'), + expire=dict(default=604800, + type='int'), + ttl=dict(default=600, + type='int'), + contact=dict(default='', + type='str'), + mx=dict(default=[], + type='list', + elements='str'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['nameserver', 'interfaces']) + ]) + ) + type = module.params['type'] + zone = module.params['zone'] + nameserver = module.params['nameserver'] + interfaces = module.params['interfaces'] + refresh = module.params['refresh'] + retry = module.params['retry'] + expire = module.params['expire'] + ttl = module.params['ttl'] + contact = module.params['contact'] + mx = module.params['mx'] + state = module.params['state'] + changed = False + diff = None + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone), + attr=['dNSZone'] + )) + + exists = bool(len(obj)) + container = 'cn=dns,{0}'.format(base_dn()) + dn = 'zoneName={0},{1}'.format(zone, container) + if contact == '': + contact = 'root@{0}.'.format(zone) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('dns/{0}'.format(type), container) + else: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + obj['zone'] = zone + obj['nameserver'] = nameserver + obj['a'] = interfaces + obj['refresh'] = convert_time(refresh) + obj['retry'] = convert_time(retry) + obj['expire'] = convert_time(expire) + obj['ttl'] = convert_time(ttl) + obj['contact'] = contact + obj['mx'] = mx + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception as e: + module.fail_json( + msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{0}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as e: + module.fail_json( + msg='Removing dns zone {0} failed: {1}'.format(zone, e) + ) + + module.exit_json( + changed=changed, + diff=diff, + zone=zone + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/udm_group.py b/ansible_collections/community/general/plugins/modules/udm_group.py new file mode 100644 index 000000000..5fe2422f8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/udm_group.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: udm_group +author: + - Tobias Rüetschi (@keachi) +short_description: Manage of the posix group +description: + - "This module allows to manage user groups on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the group is present or not. + type: str + name: + required: true + description: + - Name of the posix group. + type: str + description: + required: false + description: + - Group description. + type: str + position: + required: false + description: + - define the whole ldap position of the group, e.g. + C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). + type: str + default: '' + ou: + required: false + description: + - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). + type: str + default: '' + subpath: + required: false + description: + - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). + type: str + default: "cn=groups" +''' + + +EXAMPLES = ''' +- name: Create a POSIX group + community.general.udm_group: + name: g123m-1A + +# Create a POSIX group with the exact DN +# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com) +- name: Create a POSIX group with a DN + community.general.udm_group: + name: g123m-1A + subpath: 'cn=classes,cn=students,cn=groups' + ou: school + +# or +- name: Create a POSIX group with a DN + community.general.udm_group: + name: g123m-1A + position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, + type='str'), + description=dict(type='str'), + position=dict(default='', + type='str'), + ou=dict(default='', + type='str'), + subpath=dict(default='cn=groups', + type='str'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True + ) + name = module.params['name'] + description = module.params['description'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + diff = None + + groups = list(ldap_search( + '(&(objectClass=posixGroup)(cn={0}))'.format(name), + attr=['cn'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={0},'.format(ou) + if subpath != '': + subpath = '{0},'.format(subpath) + container = '{0}{1}{2}'.format(subpath, ou, base_dn()) + group_dn = 'cn={0},{1}'.format(name, container) + + exists = bool(len(groups)) + + if state == 'present': + try: + if not exists: + grp = umc_module_for_add('groups/group', container) + else: + grp = umc_module_for_edit('groups/group', group_dn) + grp['name'] = name + grp['description'] = description + diff = grp.diff() + changed = grp.diff() != [] + if not module.check_mode: + if not exists: + grp.create() + else: + grp.modify() + except Exception: + module.fail_json( + msg="Creating/editing group {0} in {1} failed".format(name, container) + ) + + if state == 'absent' and exists: + try: + grp = umc_module_for_edit('groups/group', group_dn) + if not module.check_mode: + grp.remove() + changed = True + except Exception: + module.fail_json( + msg="Removing group {0} failed".format(name) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/udm_share.py b/ansible_collections/community/general/plugins/modules/udm_share.py new file mode 100644 index 000000000..274391335 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/udm_share.py @@ -0,0 +1,579 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: udm_share +author: + - Tobias Rüetschi (@keachi) +short_description: Manage samba shares on a univention corporate server +description: + - "This module allows to manage samba shares on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial +options: + state: + default: "present" + choices: [ present, absent ] + description: + - Whether the share is present or not. + type: str + name: + required: true + description: + - Name + type: str + host: + required: false + description: + - Host FQDN (server which provides the share), e.g. C({{ + ansible_fqdn }}). Required if I(state=present). + type: str + path: + required: false + description: + - Directory on the providing server, e.g. C(/home). Required if I(state=present). + type: path + sambaName: + required: false + description: + - Windows name. Required if I(state=present). + type: str + aliases: [ samba_name ] + ou: + required: true + description: + - Organisational unit, inside the LDAP Base DN. + type: str + owner: + default: '0' + description: + - Directory owner of the share's root directory. + type: str + group: + default: '0' + description: + - Directory owner group of the share's root directory. + type: str + directorymode: + default: '00755' + description: + - Permissions for the share's root directory. + type: str + root_squash: + default: true + description: + - Modify user ID for root user (root squashing). + type: bool + subtree_checking: + default: true + description: + - Subtree checking. + type: bool + sync: + default: 'sync' + description: + - NFS synchronisation. + type: str + writeable: + default: true + description: + - NFS write access. + type: bool + sambaBlockSize: + description: + - Blocking size. + type: str + aliases: [ samba_block_size ] + sambaBlockingLocks: + default: true + description: + - Blocking locks. + type: bool + aliases: [ samba_blocking_locks ] + sambaBrowseable: + description: + - Show in Windows network environment. + type: bool + default: true + aliases: [ samba_browsable ] + sambaCreateMode: + default: '0744' + description: + - File mode. + type: str + aliases: [ samba_create_mode ] + sambaCscPolicy: + default: 'manual' + description: + - Client-side caching policy. + type: str + aliases: [ samba_csc_policy ] + sambaCustomSettings: + default: [] + description: + - Option name in smb.conf and its value. + type: list + elements: dict + aliases: [ samba_custom_settings ] + sambaDirectoryMode: + default: '0755' + description: + - Directory mode. + type: str + aliases: [ samba_directory_mode ] + sambaDirectorySecurityMode: + default: '0777' + description: + - Directory security mode. + type: str + aliases: [ samba_directory_security_mode ] + sambaDosFilemode: + default: false + description: + - Users with write access may modify permissions. + type: bool + aliases: [ samba_dos_filemode ] + sambaFakeOplocks: + default: false + description: + - Fake oplocks. + type: bool + aliases: [ samba_fake_oplocks ] + sambaForceCreateMode: + default: false + description: + - Force file mode. + type: bool + aliases: [ samba_force_create_mode ] + sambaForceDirectoryMode: + default: false + description: + - Force directory mode. + type: bool + aliases: [ samba_force_directory_mode ] + sambaForceDirectorySecurityMode: + default: false + description: + - Force directory security mode. + type: bool + aliases: [ samba_force_directory_security_mode ] + sambaForceGroup: + description: + - Force group. + type: str + aliases: [ samba_force_group ] + sambaForceSecurityMode: + default: false + description: + - Force security mode. + type: bool + aliases: [ samba_force_security_mode ] + sambaForceUser: + description: + - Force user. + type: str + aliases: [ samba_force_user ] + sambaHideFiles: + description: + - Hide files. + type: str + aliases: [ samba_hide_files ] + sambaHideUnreadable: + default: false + description: + - Hide unreadable files/directories. + type: bool + aliases: [ samba_hide_unreadable ] + sambaHostsAllow: + default: [] + description: + - Allowed host/network. + type: list + elements: str + aliases: [ samba_hosts_allow ] + sambaHostsDeny: + default: [] + description: + - Denied host/network. + type: list + elements: str + aliases: [ samba_hosts_deny ] + sambaInheritAcls: + default: true + description: + - Inherit ACLs. + type: bool + aliases: [ samba_inherit_acls ] + sambaInheritOwner: + default: false + description: + - Create files/directories with the owner of the parent directory. + type: bool + aliases: [ samba_inherit_owner ] + sambaInheritPermissions: + default: false + description: + - Create files/directories with permissions of the parent directory. + type: bool + aliases: [ samba_inherit_permissions ] + sambaInvalidUsers: + description: + - Invalid users or groups. + type: str + aliases: [ samba_invalid_users ] + sambaLevel2Oplocks: + default: true + description: + - Level 2 oplocks. + type: bool + aliases: [ samba_level_2_oplocks ] + sambaLocking: + default: true + description: + - Locking. + type: bool + aliases: [ samba_locking ] + sambaMSDFSRoot: + default: false + description: + - MSDFS root. + type: bool + aliases: [ samba_msdfs_root ] + sambaNtAclSupport: + default: true + description: + - NT ACL support. + type: bool + aliases: [ samba_nt_acl_support ] + sambaOplocks: + default: true + description: + - Oplocks. + type: bool + aliases: [ samba_oplocks ] + sambaPostexec: + description: + - Postexec script. + type: str + aliases: [ samba_postexec ] + sambaPreexec: + description: + - Preexec script. + type: str + aliases: [ samba_preexec ] + sambaPublic: + default: false + description: + - Allow anonymous read-only access with a guest user. + type: bool + aliases: [ samba_public ] + sambaSecurityMode: + default: '0777' + description: + - Security mode. + type: str + aliases: [ samba_security_mode ] + sambaStrictLocking: + default: 'Auto' + description: + - Strict locking. + type: str + aliases: [ samba_strict_locking ] + sambaVFSObjects: + description: + - VFS objects. + type: str + aliases: [ samba_vfs_objects ] + sambaValidUsers: + description: + - Valid users or groups. + type: str + aliases: [ samba_valid_users ] + sambaWriteList: + description: + - Restrict write access to these users/groups. + type: str + aliases: [ samba_write_list ] + sambaWriteable: + default: true + description: + - Samba write access. + type: bool + aliases: [ samba_writeable ] + nfs_hosts: + default: [] + description: + - Only allow access for this host, IP address or network. + type: list + elements: str + nfsCustomSettings: + default: [] + description: + - Option name in exports file. + type: list + elements: str + aliases: [ nfs_custom_settings ] +''' + + +EXAMPLES = ''' +- name: Create a share named home on the server ucs.example.com with the path /home + community.general.udm_share: + name: home + path: /home + host: ucs.example.com + sambaName: Home +''' + + +RETURN = '''# ''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, + type='str'), + ou=dict(required=True, + type='str'), + owner=dict(type='str', + default='0'), + group=dict(type='str', + default='0'), + path=dict(type='path'), + directorymode=dict(type='str', + default='00755'), + host=dict(type='str'), + root_squash=dict(type='bool', + default=True), + subtree_checking=dict(type='bool', + default=True), + sync=dict(type='str', + default='sync'), + writeable=dict(type='bool', + default=True), + sambaBlockSize=dict(type='str', + aliases=['samba_block_size']), + sambaBlockingLocks=dict(type='bool', + aliases=['samba_blocking_locks'], + default=True), + sambaBrowseable=dict(type='bool', + aliases=['samba_browsable'], + default=True), + sambaCreateMode=dict(type='str', + aliases=['samba_create_mode'], + default='0744'), + sambaCscPolicy=dict(type='str', + aliases=['samba_csc_policy'], + default='manual'), + sambaCustomSettings=dict(type='list', + elements='dict', + aliases=['samba_custom_settings'], + default=[]), + sambaDirectoryMode=dict(type='str', + aliases=['samba_directory_mode'], + default='0755'), + sambaDirectorySecurityMode=dict(type='str', + aliases=['samba_directory_security_mode'], + default='0777'), + sambaDosFilemode=dict(type='bool', + aliases=['samba_dos_filemode'], + default=False), + sambaFakeOplocks=dict(type='bool', + aliases=['samba_fake_oplocks'], + default=False), + sambaForceCreateMode=dict(type='bool', + aliases=['samba_force_create_mode'], + default=False), + sambaForceDirectoryMode=dict(type='bool', + aliases=['samba_force_directory_mode'], + default=False), + sambaForceDirectorySecurityMode=dict(type='bool', + aliases=['samba_force_directory_security_mode'], + default=False), + sambaForceGroup=dict(type='str', + aliases=['samba_force_group']), + sambaForceSecurityMode=dict(type='bool', + aliases=['samba_force_security_mode'], + default=False), + sambaForceUser=dict(type='str', + aliases=['samba_force_user']), + sambaHideFiles=dict(type='str', + aliases=['samba_hide_files']), + sambaHideUnreadable=dict(type='bool', + aliases=['samba_hide_unreadable'], + default=False), + sambaHostsAllow=dict(type='list', + elements='str', + aliases=['samba_hosts_allow'], + default=[]), + sambaHostsDeny=dict(type='list', + elements='str', + aliases=['samba_hosts_deny'], + default=[]), + sambaInheritAcls=dict(type='bool', + aliases=['samba_inherit_acls'], + default=True), + sambaInheritOwner=dict(type='bool', + aliases=['samba_inherit_owner'], + default=False), + sambaInheritPermissions=dict(type='bool', + aliases=['samba_inherit_permissions'], + default=False), + sambaInvalidUsers=dict(type='str', + aliases=['samba_invalid_users']), + sambaLevel2Oplocks=dict(type='bool', + aliases=['samba_level_2_oplocks'], + default=True), + sambaLocking=dict(type='bool', + aliases=['samba_locking'], + default=True), + sambaMSDFSRoot=dict(type='bool', + aliases=['samba_msdfs_root'], + default=False), + sambaName=dict(type='str', + aliases=['samba_name']), + sambaNtAclSupport=dict(type='bool', + aliases=['samba_nt_acl_support'], + default=True), + sambaOplocks=dict(type='bool', + aliases=['samba_oplocks'], + default=True), + sambaPostexec=dict(type='str', + aliases=['samba_postexec']), + sambaPreexec=dict(type='str', + aliases=['samba_preexec']), + sambaPublic=dict(type='bool', + aliases=['samba_public'], + default=False), + sambaSecurityMode=dict(type='str', + aliases=['samba_security_mode'], + default='0777'), + sambaStrictLocking=dict(type='str', + aliases=['samba_strict_locking'], + default='Auto'), + sambaVFSObjects=dict(type='str', + aliases=['samba_vfs_objects']), + sambaValidUsers=dict(type='str', + aliases=['samba_valid_users']), + sambaWriteList=dict(type='str', + aliases=['samba_write_list']), + sambaWriteable=dict(type='bool', + aliases=['samba_writeable'], + default=True), + nfs_hosts=dict(type='list', + elements='str', + default=[]), + nfsCustomSettings=dict(type='list', + elements='str', + aliases=['nfs_custom_settings'], + default=[]), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['path', 'host', 'sambaName']) + ]) + ) + name = module.params['name'] + state = module.params['state'] + changed = False + diff = None + + obj = list(ldap_search( + '(&(objectClass=univentionShare)(cn={0}))'.format(name), + attr=['cn'] + )) + + exists = bool(len(obj)) + container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn()) + dn = 'cn={0},{1}'.format(name, container) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('shares/share', container) + else: + obj = umc_module_for_edit('shares/share', dn) + + module.params['printablename'] = '{0} ({1})'.format(name, module.params['host']) + for k in obj.keys(): + if module.params[k] is True: + module.params[k] = '1' + elif module.params[k] is False: + module.params[k] = '0' + obj[k] = module.params[k] + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception as err: + module.fail_json( + msg='Creating/editing share {0} in {1} failed: {2}'.format( + name, + container, + err, + ) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('shares/share', dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as err: + module.fail_json( + msg='Removing share {0} in {1} failed: {2}'.format( + name, + container, + err, + ) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/udm_user.py b/ansible_collections/community/general/plugins/modules/udm_user.py new file mode 100644 index 000000000..05c5ad359 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/udm_user.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: udm_user +author: + - Tobias Rüetschi (@keachi) +short_description: Manage posix users on a univention corporate server +description: + - "This module allows to manage posix users on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial +options: + state: + default: "present" + choices: [ present, absent ] + description: + - Whether the user is present or not. + type: str + username: + required: true + description: + - User name + aliases: ['name'] + type: str + firstname: + description: + - First name. Required if I(state=present). + type: str + lastname: + description: + - Last name. Required if I(state=present). + type: str + password: + description: + - Password. Required if I(state=present). + type: str + birthday: + description: + - Birthday + type: str + city: + description: + - City of users business address. + type: str + country: + description: + - Country of users business address. + type: str + department_number: + description: + - Department number of users business address. + aliases: [ departmentNumber ] + type: str + description: + description: + - Description (not gecos) + type: str + display_name: + description: + - Display name (not gecos) + aliases: [ displayName ] + type: str + email: + default: [''] + description: + - A list of e-mail addresses. + type: list + elements: str + employee_number: + description: + - Employee number + aliases: [ employeeNumber ] + type: str + employee_type: + description: + - Employee type + aliases: [ employeeType ] + type: str + gecos: + description: + - GECOS + type: str + groups: + default: [] + description: + - "POSIX groups, the LDAP DNs of the groups will be found with the + LDAP filter for each group as $GROUP: + C((&(objectClass=posixGroup)(cn=$GROUP)))." + type: list + elements: str + home_share: + description: + - "Home NFS share. Must be a LDAP DN, e.g. + C(cn=home,cn=shares,ou=school,dc=example,dc=com)." + aliases: [ homeShare ] + type: str + home_share_path: + description: + - Path to home NFS share, inside the homeShare. + aliases: [ homeSharePath ] + type: str + home_telephone_number: + default: [] + description: + - List of private telephone numbers. + aliases: [ homeTelephoneNumber ] + type: list + elements: str + homedrive: + description: + - Windows home drive, e.g. C("H:"). + type: str + mail_alternative_address: + default: [] + description: + - List of alternative e-mail addresses. + aliases: [ mailAlternativeAddress ] + type: list + elements: str + mail_home_server: + description: + - FQDN of mail server + aliases: [ mailHomeServer ] + type: str + mail_primary_address: + description: + - Primary e-mail address + aliases: [ mailPrimaryAddress ] + type: str + mobile_telephone_number: + default: [] + description: + - Mobile phone number + aliases: [ mobileTelephoneNumber ] + type: list + elements: str + organisation: + description: + - Organisation + aliases: [ organization ] + type: str + overridePWHistory: + type: bool + default: false + description: + - Override password history + aliases: [ override_pw_history ] + overridePWLength: + type: bool + default: false + description: + - Override password check + aliases: [ override_pw_length ] + pager_telephonenumber: + default: [] + description: + - List of pager telephone numbers. + aliases: [ pagerTelephonenumber ] + type: list + elements: str + phone: + description: + - List of telephone numbers. + type: list + elements: str + default: [] + postcode: + description: + - Postal code of users business address. + type: str + primary_group: + description: + - Primary group. This must be the group LDAP DN. + - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN). + aliases: [ primaryGroup ] + type: str + profilepath: + description: + - Windows profile directory + type: str + pwd_change_next_login: + choices: [ '0', '1' ] + description: + - Change password on next login. + aliases: [ pwdChangeNextLogin ] + type: str + room_number: + description: + - Room number of users business address. + aliases: [ roomNumber ] + type: str + samba_privileges: + description: + - "Samba privilege, like allow printer administration, do domain + join." + aliases: [ sambaPrivileges ] + type: list + elements: str + default: [] + samba_user_workstations: + description: + - Allow the authentication only on this Microsoft Windows host. + aliases: [ sambaUserWorkstations ] + type: list + elements: str + default: [] + sambahome: + description: + - Windows home path, e.g. C('\\$FQDN\$USERNAME'). + type: str + scriptpath: + description: + - Windows logon script. + type: str + secretary: + default: [] + description: + - A list of superiors as LDAP DNs. + type: list + elements: str + serviceprovider: + default: [''] + description: + - Enable user for the following service providers. + type: list + elements: str + shell: + default: '/bin/bash' + description: + - Login shell + type: str + street: + description: + - Street of users business address. + type: str + title: + description: + - Title, e.g. C(Prof.). + type: str + unixhome: + description: + - Unix home directory + - If not specified, it defaults to C(/home/$USERNAME). + type: str + userexpiry: + description: + - Account expiry date, e.g. C(1999-12-31). + - If not specified, it defaults to the current day plus one year. + type: str + position: + default: '' + description: + - "Define the whole position of users object inside the LDAP tree, + e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." + type: str + update_password: + default: always + choices: [ always, on_create ] + description: + - "C(always) will update passwords if they differ. + C(on_create) will only set the password for newly created users." + type: str + ou: + default: '' + description: + - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for + LDAP OU C(ou=school,dc=example,dc=com)." + type: str + subpath: + default: 'cn=users' + description: + - "LDAP subpath inside the organizational unit, e.g. + C(cn=teachers,cn=users) for LDAP container + C(cn=teachers,cn=users,dc=example,dc=com)." + type: str +''' + + +EXAMPLES = ''' +- name: Create a user on a UCS + community.general.udm_user: + name: FooBar + password: secure_password + firstname: Foo + lastname: Bar + +- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) + community.general.udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + ou: school + subpath: 'cn=teachers,cn=users' + +# or define the position +- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) + community.general.udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + +import crypt +from datetime import date, timedelta + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +def main(): + expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") + module = AnsibleModule( + argument_spec=dict( + birthday=dict(type='str'), + city=dict(type='str'), + country=dict(type='str'), + department_number=dict(type='str', + aliases=['departmentNumber']), + description=dict(type='str'), + display_name=dict(type='str', + aliases=['displayName']), + email=dict(default=[''], + type='list', + elements='str'), + employee_number=dict(type='str', + aliases=['employeeNumber']), + employee_type=dict(type='str', + aliases=['employeeType']), + firstname=dict(type='str'), + gecos=dict(type='str'), + groups=dict(default=[], + type='list', + elements='str'), + home_share=dict(type='str', + aliases=['homeShare']), + home_share_path=dict(type='str', + aliases=['homeSharePath']), + home_telephone_number=dict(default=[], + type='list', + elements='str', + aliases=['homeTelephoneNumber']), + homedrive=dict(type='str'), + lastname=dict(type='str'), + mail_alternative_address=dict(default=[], + type='list', + elements='str', + aliases=['mailAlternativeAddress']), + mail_home_server=dict(type='str', + aliases=['mailHomeServer']), + mail_primary_address=dict(type='str', + aliases=['mailPrimaryAddress']), + mobile_telephone_number=dict(default=[], + type='list', + elements='str', + aliases=['mobileTelephoneNumber']), + organisation=dict(type='str', + aliases=['organization']), + overridePWHistory=dict(default=False, + type='bool', + aliases=['override_pw_history']), + overridePWLength=dict(default=False, + type='bool', + aliases=['override_pw_length']), + pager_telephonenumber=dict(default=[], + type='list', + elements='str', + aliases=['pagerTelephonenumber']), + password=dict(type='str', + no_log=True), + phone=dict(default=[], + type='list', + elements='str'), + postcode=dict(type='str'), + primary_group=dict(type='str', + aliases=['primaryGroup']), + profilepath=dict(type='str'), + pwd_change_next_login=dict(type='str', + choices=['0', '1'], + aliases=['pwdChangeNextLogin']), + room_number=dict(type='str', + aliases=['roomNumber']), + samba_privileges=dict(default=[], + type='list', + elements='str', + aliases=['sambaPrivileges']), + samba_user_workstations=dict(default=[], + type='list', + elements='str', + aliases=['sambaUserWorkstations']), + sambahome=dict(type='str'), + scriptpath=dict(type='str'), + secretary=dict(default=[], + type='list', + elements='str'), + serviceprovider=dict(default=[''], + type='list', + elements='str'), + shell=dict(default='/bin/bash', + type='str'), + street=dict(type='str'), + title=dict(type='str'), + unixhome=dict(type='str'), + userexpiry=dict(type='str'), + username=dict(required=True, + aliases=['name'], + type='str'), + position=dict(default='', + type='str'), + update_password=dict(default='always', + choices=['always', 'on_create'], + type='str'), + ou=dict(default='', + type='str'), + subpath=dict(default='cn=users', + type='str'), + state=dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if=([ + ('state', 'present', ['firstname', 'lastname', 'password']) + ]) + ) + username = module.params['username'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + diff = None + + users = list(ldap_search( + '(&(objectClass=posixAccount)(uid={0}))'.format(username), + attr=['uid'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={0},'.format(ou) + if subpath != '': + subpath = '{0},'.format(subpath) + container = '{0}{1}{2}'.format(subpath, ou, base_dn()) + user_dn = 'uid={0},{1}'.format(username, container) + + exists = bool(len(users)) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('users/user', container) + else: + obj = umc_module_for_edit('users/user', user_dn) + + if module.params['displayName'] is None: + module.params['displayName'] = '{0} {1}'.format( + module.params['firstname'], + module.params['lastname'] + ) + if module.params['unixhome'] is None: + module.params['unixhome'] = '/home/{0}'.format( + module.params['username'] + ) + for k in obj.keys(): + if (k != 'password' and + k != 'groups' and + k != 'overridePWHistory' and + k in module.params and + module.params[k] is not None): + obj[k] = module.params[k] + # handle some special values + obj['e-mail'] = module.params['email'] + if 'userexpiry' in obj and obj.get('userexpiry') is None: + obj['userexpiry'] = expiry + password = module.params['password'] + if obj['password'] is None: + obj['password'] = password + if module.params['update_password'] == 'always': + old_password = obj['password'].split('}', 2)[1] + if crypt.crypt(password, old_password) != old_password: + obj['overridePWHistory'] = module.params['overridePWHistory'] + obj['overridePWLength'] = module.params['overridePWLength'] + obj['password'] = password + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception: + module.fail_json( + msg="Creating/editing user {0} in {1} failed".format( + username, + container + ) + ) + try: + groups = module.params['groups'] + if groups: + filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format( + ')(cn='.join(groups) + ) + group_dns = list(ldap_search(filter, attr=['dn'])) + for dn in group_dns: + grp = umc_module_for_edit('groups/group', dn[0]) + if user_dn not in grp['users']: + grp['users'].append(user_dn) + if not module.check_mode: + grp.modify() + changed = True + except Exception: + module.fail_json( + msg="Adding groups to user {0} failed".format(username) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('users/user', user_dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception: + module.fail_json( + msg="Removing user {0} failed".format(username) + ) + + module.exit_json( + changed=changed, + username=username, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/ufw.py b/ansible_collections/community/general/plugins/modules/ufw.py new file mode 100644 index 000000000..45c98fd63 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/ufw.py @@ -0,0 +1,606 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Ahti Kitsik +# Copyright (c) 2014, Jarno Keskikangas +# Copyright (c) 2013, Aleksey Ovcharenko +# Copyright (c) 2013, James Martin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ufw +short_description: Manage firewall with UFW +description: + - Manage firewall with UFW. +author: + - Aleksey Ovcharenko (@ovcharenko) + - Jarno Keskikangas (@pyykkis) + - Ahti Kitsik (@ahtik) +notes: + - See C(man ufw) for more examples. +requirements: + - C(ufw) package +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - C(enabled) reloads firewall and enables firewall on boot. + - C(disabled) unloads firewall and disables firewall on boot. + - C(reloaded) reloads firewall. + - C(reset) disables and resets firewall to installation defaults. + type: str + choices: [ disabled, enabled, reloaded, reset ] + default: + description: + - Change the default policy for incoming or outgoing traffic. + type: str + choices: [ allow, deny, reject ] + aliases: [ policy ] + direction: + description: + - Select direction for a rule or default policy command. Mutually + exclusive with I(interface_in) and I(interface_out). + type: str + choices: [ in, incoming, out, outgoing, routed ] + logging: + description: + - Toggles logging. Logged packets use the LOG_KERN syslog facility. + type: str + choices: [ 'on', 'off', low, medium, high, full ] + insert: + description: + - Insert the corresponding rule as rule number NUM. + - Note that ufw numbers rules starting with 1. + - If I(delete=true) and a value is provided for I(insert), + then I(insert) is ignored. + type: int + insert_relative_to: + description: + - Allows to interpret the index in I(insert) relative to a position. + - C(zero) interprets the rule number as an absolute index (i.e. 1 is + the first rule). + - C(first-ipv4) interprets the rule number relative to the index of the + first IPv4 rule, or relative to the position where the first IPv4 rule + would be if there is currently none. + - C(last-ipv4) interprets the rule number relative to the index of the + last IPv4 rule, or relative to the position where the last IPv4 rule + would be if there is currently none. + - C(first-ipv6) interprets the rule number relative to the index of the + first IPv6 rule, or relative to the position where the first IPv6 rule + would be if there is currently none. + - C(last-ipv6) interprets the rule number relative to the index of the + last IPv6 rule, or relative to the position where the last IPv6 rule + would be if there is currently none. + type: str + choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ] + default: zero + rule: + description: + - Add firewall rule + type: str + choices: [ allow, deny, limit, reject ] + log: + description: + - Log new connections matched to this rule + type: bool + default: false + from_ip: + description: + - Source IP address. + type: str + default: any + aliases: [ from, src ] + from_port: + description: + - Source port. + type: str + to_ip: + description: + - Destination IP address. + type: str + default: any + aliases: [ dest, to] + to_port: + description: + - Destination port. + type: str + aliases: [ port ] + proto: + description: + - TCP/IP protocol. + type: str + choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ] + aliases: [ protocol ] + name: + description: + - Use profile located in C(/etc/ufw/applications.d). + type: str + aliases: [ app ] + delete: + description: + - Delete rule. + - If I(delete=true) and a value is provided for I(insert), + then I(insert) is ignored. + type: bool + default: false + interface: + description: + - Specify interface for the rule. The direction (in or out) used + for the interface depends on the value of I(direction). See + I(interface_in) and I(interface_out) for routed rules that needs + to supply both an input and output interface. Mutually + exclusive with I(interface_in) and I(interface_out). + type: str + aliases: [ if ] + interface_in: + description: + - Specify input interface for the rule. This is mutually + exclusive with I(direction) and I(interface). However, it is + compatible with I(interface_out) for routed rules. + type: str + aliases: [ if_in ] + version_added: '0.2.0' + interface_out: + description: + - Specify output interface for the rule. This is mutually + exclusive with I(direction) and I(interface). However, it is + compatible with I(interface_in) for routed rules. + type: str + aliases: [ if_out ] + version_added: '0.2.0' + route: + description: + - Apply the rule to routed/forwarded packets. + type: bool + default: false + comment: + description: + - Add a comment to the rule. Requires UFW version >=0.35. + type: str +''' + +EXAMPLES = r''' +- name: Allow everything and enable UFW + community.general.ufw: + state: enabled + policy: allow + +- name: Set logging + community.general.ufw: + logging: 'on' + +# Sometimes it is desirable to let the sender know when traffic is +# being denied, rather than simply ignoring it. In these cases, use +# reject instead of deny. In addition, log rejected connections: +- community.general.ufw: + rule: reject + port: auth + log: true + +# ufw supports connection rate limiting, which is useful for protecting +# against brute-force login attacks. ufw will deny connections if an IP +# address has attempted to initiate 6 or more connections in the last +# 30 seconds. See http://www.debian-administration.org/articles/187 +# for details. Typical usage is: +- community.general.ufw: + rule: limit + port: ssh + proto: tcp + +# Allow OpenSSH. (Note that as ufw manages its own state, simply removing +# a rule=allow task can leave those ports exposed. Either use delete=true +# or a separate state=reset task) +- community.general.ufw: + rule: allow + name: OpenSSH + +- name: Delete OpenSSH rule + community.general.ufw: + rule: allow + name: OpenSSH + delete: true + +- name: Deny all access to port 53 + community.general.ufw: + rule: deny + port: '53' + +- name: Allow port range 60000-61000 + community.general.ufw: + rule: allow + port: 60000:61000 + proto: tcp + +- name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + +- name: Allow all access from RFC1918 networks to this host + community.general.ufw: + rule: allow + src: '{{ item }}' + loop: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + +- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment + community.general.ufw: + rule: deny + proto: udp + src: 1.2.3.4 + port: '514' + comment: Block syslog + +- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 + community.general.ufw: + rule: allow + interface: eth0 + direction: in + proto: udp + src: 1.2.3.5 + from_port: '5469' + dest: 1.2.3.4 + to_port: '5469' + +# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. +- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host + community.general.ufw: + rule: deny + proto: tcp + src: 2001:db8::/32 + port: '25' + +- name: Deny all IPv6 traffic to tcp port 20 on this host + # this should be the first IPv6 rule + community.general.ufw: + rule: deny + proto: tcp + port: '20' + to_ip: "::" + insert: 0 + insert_relative_to: first-ipv6 + +- name: Deny all IPv4 traffic to tcp port 20 on this host + # This should be the third to last IPv4 rule + # (insert: -1 addresses the second to last IPv4 rule; + # so the new rule will be inserted before the second + # to last IPv4 rule, and will be come the third to last + # IPv4 rule.) + community.general.ufw: + rule: deny + proto: tcp + port: '20' + to_ip: "::" + insert: -1 + insert_relative_to: last-ipv4 + +# Can be used to further restrict a global FORWARD policy set to allow +- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24 + community.general.ufw: + rule: deny + route: true + src: 192.0.2.0/24 + dest: 198.51.100.0/24 +''' + +import re + +from operator import itemgetter + +from ansible.module_utils.basic import AnsibleModule + + +def compile_ipv4_regexp(): + r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}" + r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])" + return re.compile(r) + + +def compile_ipv6_regexp(): + """ + validation pattern provided by : + https://stackoverflow.com/questions/53497/regular-expression-that-matches- + valid-ipv6-addresses#answer-17871737 + """ + r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:" + r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}" + r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})" + r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]" + r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]" + r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})" + r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]" + r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))" + return re.compile(r) + + +def main(): + command_keys = ['state', 'default', 'rule', 'logging'] + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), + default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), + logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), + direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), + delete=dict(type='bool', default=False), + route=dict(type='bool', default=False), + insert=dict(type='int'), + insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'), + rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), + interface=dict(type='str', aliases=['if']), + interface_in=dict(type='str', aliases=['if_in']), + interface_out=dict(type='str', aliases=['if_out']), + log=dict(type='bool', default=False), + from_ip=dict(type='str', default='any', aliases=['from', 'src']), + from_port=dict(type='str'), + to_ip=dict(type='str', default='any', aliases=['dest', 'to']), + to_port=dict(type='str', aliases=['port']), + proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']), + name=dict(type='str', aliases=['app']), + comment=dict(type='str'), + ), + supports_check_mode=True, + mutually_exclusive=[ + ['name', 'proto', 'logging'], + # Mutual exclusivity with `interface` implied by `required_by`. + ['direction', 'interface_in'], + ['direction', 'interface_out'], + ], + required_one_of=([command_keys]), + required_by=dict( + interface=('direction', ), + ), + ) + + cmds = [] + + ipv4_regexp = compile_ipv4_regexp() + ipv6_regexp = compile_ipv6_regexp() + + def filter_line_that_not_start_with(pattern, content): + return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)]) + + def filter_line_that_contains(pattern, content): + return [line for line in content.splitlines(True) if pattern in line] + + def filter_line_that_not_contains(pattern, content): + return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)]) + + def filter_line_that_match_func(match_func, content): + return ''.join([line for line in content.splitlines(True) if match_func(line) is not None]) + + def filter_line_that_contains_ipv4(content): + return filter_line_that_match_func(ipv4_regexp.search, content) + + def filter_line_that_contains_ipv6(content): + return filter_line_that_match_func(ipv6_regexp.search, content) + + def is_starting_by_ipv4(ip): + return ipv4_regexp.match(ip) is not None + + def is_starting_by_ipv6(ip): + return ipv6_regexp.match(ip) is not None + + def execute(cmd, ignore_error=False): + cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + + cmds.append(cmd) + (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"}) + + if rc != 0 and not ignore_error: + module.fail_json(msg=err or out, commands=cmds) + + return out + + def get_current_rules(): + user_rules_files = ["/lib/ufw/user.rules", + "/lib/ufw/user6.rules", + "/etc/ufw/user.rules", + "/etc/ufw/user6.rules", + "/var/lib/ufw/user.rules", + "/var/lib/ufw/user6.rules"] + + cmd = [[grep_bin], ["-h"], ["'^### tuple'"]] + + cmd.extend([[f] for f in user_rules_files]) + return execute(cmd, ignore_error=True) + + def ufw_version(): + """ + Returns the major and minor version of ufw installed on the system. + """ + out = execute([[ufw_bin], ["--version"]]) + + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) + + matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) + if matches is None: + module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + params = module.params + + commands = dict((key, params[key]) for key in command_keys if params[key]) + + # Ensure ufw is available + ufw_bin = module.get_bin_path('ufw', True) + grep_bin = module.get_bin_path('grep', True) + + # Save the pre state and rules in order to recognize changes + pre_state = execute([[ufw_bin], ['status verbose']]) + pre_rules = get_current_rules() + + changed = False + + # Execute filter + for (command, value) in commands.items(): + + cmd = [[ufw_bin], [module.check_mode, '--dry-run']] + + if command == 'state': + states = {'enabled': 'enable', 'disabled': 'disable', + 'reloaded': 'reload', 'reset': 'reset'} + + if value in ['reloaded', 'reset']: + changed = True + + if module.check_mode: + # "active" would also match "inactive", hence the space + ufw_enabled = pre_state.find(" active") != -1 + if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled): + changed = True + else: + execute(cmd + [['-f'], [states[value]]]) + + elif command == 'logging': + extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state) + if extract: + current_level = extract.group(2) + current_on_off_value = extract.group(1) + if value != "off": + if current_on_off_value == "off": + changed = True + elif value != "on" and value != current_level: + changed = True + elif current_on_off_value != "off": + changed = True + else: + changed = True + + if not module.check_mode: + execute(cmd + [[command], [value]]) + + elif command == 'default': + if params['direction'] not in ['outgoing', 'incoming', 'routed', None]: + module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.') + if module.check_mode: + regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)' + extract = re.search(regexp, pre_state) + if extract is not None: + current_default_values = {} + current_default_values["incoming"] = extract.group(1) + current_default_values["outgoing"] = extract.group(2) + current_default_values["routed"] = extract.group(3) + v = current_default_values[params['direction'] or 'incoming'] + if v not in (value, 'disabled'): + changed = True + else: + changed = True + else: + execute(cmd + [[command], [value], [params['direction']]]) + + elif command == 'rule': + if params['direction'] not in ['in', 'out', None]: + module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.') + if not params['route'] and params['interface_in'] and params['interface_out']: + module.fail_json(msg='Only route rules can combine ' + 'interface_in and interface_out') + # Rules are constructed according to the long format + # + # ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ + # [proto protocol] [app application] [comment COMMENT] + cmd.append([module.boolean(params['route']), 'route']) + cmd.append([module.boolean(params['delete']), 'delete']) + if params['insert'] is not None and not params['delete']: + relative_to_cmd = params['insert_relative_to'] + if relative_to_cmd == 'zero': + insert_to = params['insert'] + else: + (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered']) + numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ') + lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] + lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] + last_number = max([no for (no, ipv6) in lines]) if lines else 0 + has_ipv4 = any(not ipv6 for (no, ipv6) in lines) + has_ipv6 = any(ipv6 for (no, ipv6) in lines) + if relative_to_cmd == 'first-ipv4': + relative_to = 1 + elif relative_to_cmd == 'last-ipv4': + relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1 + elif relative_to_cmd == 'first-ipv6': + relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1 + elif relative_to_cmd == 'last-ipv6': + relative_to = last_number if has_ipv6 else last_number + 1 + insert_to = params['insert'] + relative_to + if insert_to > last_number: + # ufw does not like it when the insert number is larger than the + # maximal rule number for IPv4/IPv6. + insert_to = None + cmd.append([insert_to is not None, "insert %s" % insert_to]) + cmd.append([value]) + cmd.append([params['direction'], "%s" % params['direction']]) + cmd.append([params['interface'], "on %s" % params['interface']]) + cmd.append([params['interface_in'], "in on %s" % params['interface_in']]) + cmd.append([params['interface_out'], "out on %s" % params['interface_out']]) + cmd.append([module.boolean(params['log']), 'log']) + + for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), + ('to_ip', "to %s"), ('to_port', "port %s"), + ('proto', "proto %s"), ('name', "app '%s'")]: + value = params[key] + cmd.append([value, template % (value)]) + + ufw_major, ufw_minor, dummy = ufw_version() + # comment is supported only in ufw version after 0.35 + if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: + cmd.append([params['comment'], "comment '%s'" % params['comment']]) + + rules_dry = execute(cmd) + + if module.check_mode: + + nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry)) + + if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))): + + rules_dry = filter_line_that_not_start_with("### tuple", rules_dry) + # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules + if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']): + if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry): + changed = True + elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']): + if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry): + changed = True + elif pre_rules != rules_dry: + changed = True + + # Get the new state + if module.check_mode: + return module.exit_json(changed=changed, commands=cmds) + else: + post_state = execute([[ufw_bin], ['status'], ['verbose']]) + if not changed: + post_rules = get_current_rules() + changed = (pre_state != post_state) or (pre_rules != post_rules) + return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/uptimerobot.py b/ansible_collections/community/general/plugins/modules/uptimerobot.py new file mode 100644 index 000000000..c1894e90a --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/uptimerobot.py @@ -0,0 +1,157 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: uptimerobot +short_description: Pause and start Uptime Robot monitoring +description: + - This module will let you start and pause Uptime Robot Monitoring +author: "Nate Kingsley (@nate-kingsley)" +requirements: + - Valid Uptime Robot API Key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + type: str + description: + - Define whether or not the monitor should be running or paused. + required: true + choices: [ "started", "paused" ] + monitorid: + type: str + description: + - ID of the monitor to check. + required: true + apikey: + type: str + description: + - Uptime Robot API key. + required: true +notes: + - Support for adding and removing monitors and alert contacts has not yet been implemented. +''' + +EXAMPLES = ''' +- name: Pause the monitor with an ID of 12345 + community.general.uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: paused + +- name: Start the monitor with an ID of 12345 + community.general.uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: started +''' + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text + + +API_BASE = "https://api.uptimerobot.com/" + +API_ACTIONS = dict( + status='getMonitors?', + editMonitor='editMonitor?' +) + +API_FORMAT = 'json' +API_NOJSONCALLBACK = 1 +CHANGED_STATE = False +SUPPORTS_CHECK_MODE = False + + +def checkID(module, params): + + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['status'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult + + +def startMonitor(module, params): + + params['monitorStatus'] = 1 + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult['stat'] + + +def pauseMonitor(module, params): + + params['monitorStatus'] = 0 + data = urlencode(params) + full_uri = API_BASE + API_ACTIONS['editMonitor'] + data + req, info = fetch_url(module, full_uri) + result = to_text(req.read()) + jsonresult = json.loads(result) + req.close() + return jsonresult['stat'] + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['started', 'paused']), + apikey=dict(required=True, no_log=True), + monitorid=dict(required=True) + ), + supports_check_mode=SUPPORTS_CHECK_MODE + ) + + params = dict( + apiKey=module.params['apikey'], + monitors=module.params['monitorid'], + monitorID=module.params['monitorid'], + format=API_FORMAT, + noJsonCallback=API_NOJSONCALLBACK + ) + + check_result = checkID(module, params) + + if check_result['stat'] != "ok": + module.fail_json( + msg="failed", + result=check_result['message'] + ) + + if module.params['state'] == 'started': + monitor_result = startMonitor(module, params) + else: + monitor_result = pauseMonitor(module, params) + + module.exit_json( + msg="success", + result=monitor_result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/urpmi.py b/ansible_collections/community/general/plugins/modules/urpmi.py new file mode 100644 index 000000000..34e099e4d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/urpmi.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Philippe Makowski +# Written by Philippe Makowski +# Based on apt module written by Matthew Williams + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: urpmi +short_description: Urpmi manager +description: + - Manages packages with I(urpmi) (such as for Mageia or Mandriva) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - A list of package names to install, upgrade or remove. + required: true + aliases: [ package, pkg ] + type: list + elements: str + state: + description: + - Indicates the desired package state. + choices: [ absent, present, installed, removed ] + default: present + type: str + update_cache: + description: + - Update the package database first C(urpmi.update -a). + type: bool + default: false + no_recommends: + description: + - Corresponds to the C(--no-recommends) option for I(urpmi). + type: bool + default: true + force: + description: + - Assume "yes" is the answer to any question urpmi has to ask. + Corresponds to the C(--force) option for I(urpmi). + type: bool + default: true + root: + description: + - Specifies an alternative install root, relative to which all packages will be installed. + Corresponds to the C(--root) option for I(urpmi). + aliases: [ installroot ] + type: str +author: +- Philippe Makowski (@pmakowski) +''' + +EXAMPLES = ''' +- name: Install package foo + community.general.urpmi: + pkg: foo + state: present + +- name: Remove package foo + community.general.urpmi: + pkg: foo + state: absent + +- name: Remove packages foo and bar + community.general.urpmi: + pkg: foo,bar + state: absent + +- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) +- community.general.urpmi: + name: bar + state: present + update_cache: true +''' + + +from ansible.module_utils.basic import AnsibleModule + + +def query_package(module, name, root): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rpm_path = module.get_bin_path("rpm", True) + cmd = "%s -q %s %s" % (rpm_path, name, root_option(root)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + + +def query_package_provides(module, name, root): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rpm_path = module.get_bin_path("rpm", True) + cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + return rc == 0 + + +def update_package_db(module): + + urpmiupdate_path = module.get_bin_path("urpmi.update", True) + cmd = "%s -a -q" % (urpmiupdate_path,) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db") + + +def remove_packages(module, packages, root): + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package, root): + continue + + urpme_path = module.get_bin_path("urpme", True) + cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgspec, root, force=True, no_recommends=True): + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package, root): + packages += "'%s' " % package + + if len(packages) != 0: + if no_recommends: + no_recommends_yes = '--no-recommends' + else: + no_recommends_yes = '' + + if force: + force_yes = '--force' + else: + force_yes = '' + + urpmi_path = module.get_bin_path("urpmi", True) + cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes, + no_recommends_yes, + root_option(root), + packages)) + + rc, out, err = module.run_command(cmd) + + for package in pkgspec: + if not query_package_provides(module, package, root): + module.fail_json(msg="'urpmi %s' failed: %s" % (package, err)) + + # urpmi always have 0 for exit code if --force is used + if rc: + module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) + else: + module.exit_json(changed=True, msg="%s present(s)" % packages) + else: + module.exit_json(changed=False) + + +def root_option(root): + if (root): + return "--root=%s" % (root) + else: + return "" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', + choices=['absent', 'installed', 'present', 'removed']), + update_cache=dict(type='bool', default=False), + force=dict(type='bool', default=True), + no_recommends=dict(type='bool', default=True), + name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']), + root=dict(type='str', aliases=['installroot']), + ), + ) + + p = module.params + + if p['update_cache']: + update_package_db(module) + + if p['state'] in ['installed', 'present']: + install_packages(module, p['name'], p['root'], p['force'], p['no_recommends']) + + elif p['state'] in ['removed', 'absent']: + remove_packages(module, p['name'], p['root']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_aaa_group.py b/ansible_collections/community/general/plugins/modules/utm_aaa_group.py new file mode 100644 index 000000000..9c595284d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_aaa_group.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_aaa_group + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy an aaa group object in Sophos UTM + +description: + - Create, update or destroy an aaa group object in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - The name of the object. Will be used to identify the entry. + type: str + required: true + adirectory_groups: + description: + - List of adirectory group strings. + type: list + elements: str + default: [] + adirectory_groups_sids: + description: + - Dictionary of group sids. + type: dict + default: {} + backend_match: + description: + - The backend for the group. + type: str + choices: + - none + - adirectory + - edirectory + - radius + - tacacs + - ldap + default: none + comment: + description: + - Comment that describes the AAA group. + type: str + default: '' + dynamic: + description: + - Group type. Is static if none is selected. + type: str + default: none + choices: + - none + - ipsec_dn + - directory_groups + edirectory_groups: + description: + - List of edirectory group strings. + type: list + elements: str + default: [] + ipsec_dn: + description: + - The ipsec dn string. + type: str + default: '' + ldap_attribute: + description: + - The ldap attribute to check against. + type: str + default: '' + ldap_attribute_value: + description: + - The ldap attribute value to check against. + type: str + default: '' + members: + description: + - A list of user ref names (aaa/user). + type: list + elements: str + default: [] + network: + description: + - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa). + type: str + default: "" + radius_groups: + description: + - A list of radius group strings. + type: list + elements: str + default: [] + tacacs_groups: + description: + - A list of tacacs group strings. + type: list + elements: str + default: [] + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create UTM aaa_group + community.general.utm_aaa_group: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry + backend_match: ldap + dynamic: directory_groups + ldap_attributes: memberof + ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com" + network: REF_OBJECT_STRING + state: present + +- name: Remove UTM aaa_group + community.general.utm_aaa_group: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created. + returned: success + type: complex + contains: + _ref: + description: The reference name of the object. + type: str + _locked: + description: Whether or not the object is currently locked. + type: bool + _type: + description: The type of the object. + type: str + name: + description: The name of the object. + type: str + adirectory_groups: + description: List of Active Directory Groups. + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS. + type: list + backend_match: + description: The backend to use. + type: str + comment: + description: The comment string. + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group. + type: str + edirectory_groups: + description: List of eDirectory Groups. + type: str + ipsec_dn: + description: ipsec_dn identifier to match. + type: str + ldap_attribute: + description: The LDAP Attribute to match against. + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against. + type: str + members: + description: List of member identifiers of the group. + type: list + network: + description: The identifier of the network (network/aaa). + type: str + radius_group: + description: The radius group identifier. + type: str + tacacs_group: + description: The tacacs group identifier. + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "aaa/group" + key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic", + "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members", + "network", "radius_groups", "tacacs_groups"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + adirectory_groups=dict(type='list', elements='str', required=False, default=[]), + adirectory_groups_sids=dict(type='dict', required=False, default={}), + backend_match=dict(type='str', required=False, default="none", + choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]), + comment=dict(type='str', required=False, default=""), + dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]), + edirectory_groups=dict(type='list', elements='str', required=False, default=[]), + ipsec_dn=dict(type='str', required=False, default=""), + ldap_attribute=dict(type='str', required=False, default=""), + ldap_attribute_value=dict(type='str', required=False, default=""), + members=dict(type='list', elements='str', required=False, default=[]), + network=dict(type='str', required=False, default=""), + radius_groups=dict(type='list', elements='str', required=False, default=[]), + tacacs_groups=dict(type='list', elements='str', required=False, default=[]), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py b/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py new file mode 100644 index 000000000..37e01c736 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_aaa_group_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Get info for reverse_proxy frontend entry in Sophos UTM + +description: + - get info for a reverse_proxy frontend entry in SOPHOS UTM. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module + +''' + +EXAMPLES = """ +- name: Remove UTM aaa_group + community.general.utm_aaa_group_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAAAGroupEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + adirectory_groups: + description: List of Active Directory Groups + type: str + adirectory_groups_sids: + description: List of Active Directory Groups SIDS + type: list + backend_match: + description: The backend to use + type: str + comment: + description: The comment string + type: str + dynamic: + description: Whether the group match is ipsec_dn or directory_group + type: str + edirectory_groups: + description: List of eDirectory Groups + type: str + ipsec_dn: + description: ipsec_dn identifier to match + type: str + ldap_attribute: + description: The LDAP Attribute to match against + type: str + ldap_attribute_value: + description: The LDAP Attribute Value to match against + type: str + members: + description: List of member identifiers of the group + type: list + network: + description: The identifier of the network (network/aaa) + type: str + radius_group: + description: The radius group identifier + type: str + tacacs_group: + description: The tacacs group identifier + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "aaa/group" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ), + supports_check_mode=True, + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py new file mode 100644 index 000000000..b944e8312 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_ca_host_key_cert + +author: + - Stephan Schwarz (@stearz) + +short_description: Create, update or destroy ca host_key_cert entry in Sophos UTM + +description: + - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - The name of the object. Will be used to identify the entry. + required: true + type: str + ca: + description: + - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + required: true + type: str + meta: + description: + - A reference to an existing utm_ca_meta_x509 object. + required: true + type: str + certificate: + description: + - The certificate in PEM format. + required: true + type: str + comment: + description: + - Optional comment string. + type: str + encrypted: + description: + - Optionally enable encryption. + default: false + type: bool + key: + description: + - Optional private key in PEM format. + type: str + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create a ca_host_key_cert entry + community.general.utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + ca: REF_ca/signing_ca_OBJECT_STRING + meta: REF_ca/meta_x509_OBJECT_STRING + certificate: | + --- BEGIN CERTIFICATE --- + . . . + . . . + . . . + --- END CERTIFICATE --- + state: present + +- name: Remove a ca_host_key_cert entry + community.general.utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + state: absent + +- name: Read a ca_host_key_cert entry + community.general.utm_ca_host_key_cert: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry + state: info + +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format + type: str + comment: + description: Comment string (may be empty string) + type: str + encrypted: + description: If encryption is enabled + type: bool + key: + description: Private key in PEM format (may be empty string) + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "ca/host_key_cert" + key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + ca=dict(type='str', required=True), + meta=dict(type='str', required=True), + certificate=dict(type='str', required=True), + comment=dict(type='str', required=False), + encrypted=dict(type='bool', required=False, default=False), + key=dict(type='str', required=False, no_log=True), + ) + ) + try: + # This is needed because the bool value only accepts int values in the backend + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py new file mode 100644 index 000000000..d81eede69 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_ca_host_key_cert_info + +author: + - Stephan Schwarz (@stearz) + +short_description: Get info for a ca host_key_cert entry in Sophos UTM + +description: + - Get info for a ca host_key_cert entry in SOPHOS UTM. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = """ +- name: Get info for a ca host_key_cert entry + community.general.utm_ca_host_key_cert_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestHostKeyCertEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + ca: + description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object. + type: str + meta: + description: A reference to an existing utm_ca_meta_x509 object. + type: str + certificate: + description: The certificate in PEM format + type: str + comment: + description: Comment string (may be empty string) + type: str + encrypted: + description: If encryption is enabled + type: bool + key: + description: Private key in PEM format (may be empty string) + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "ca/host_key_cert" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ), + supports_check_mode=True, + ) + try: + # This is needed because the bool value only accepts int values in the backend + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_dns_host.py b/ansible_collections/community/general/plugins/modules/utm_dns_host.py new file mode 100644 index 000000000..6b3725557 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_dns_host.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_dns_host + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy dns entry in Sophos UTM + +description: + - Create, update or destroy a dns entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + address: + type: str + description: + - The IPV4 Address of the entry. Can be left empty for automatic resolving. + default: 0.0.0.0 + address6: + type: str + description: + - The IPV6 Address of the entry. Can be left empty for automatic resolving. + default: "::" + comment: + type: str + description: + - An optional comment to add to the dns host object + default: '' + hostname: + type: str + description: + - The hostname for the dns host object + interface: + type: str + description: + - The reference name of the interface to use. If not provided the default interface will be used + default: '' + resolved: + description: + - whether the hostname's ipv4 address is already resolved or not + default: false + type: bool + resolved6: + description: + - whether the hostname's ipv6 address is already resolved or not + default: false + type: bool + timeout: + type: int + description: + - the timeout for the utm to resolve the ip address for the hostname again + default: 0 + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create UTM dns host entry + community.general.utm_dns_host: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestDNSEntry + hostname: testentry.some.tld + state: present + +- name: Remove UTM dns host entry + community.general.utm_dns_host: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestDNSEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + name: + description: The name of the object + type: str + address: + description: The ipv4 address of the object + type: str + address6: + description: The ipv6 address of the object + type: str + comment: + description: The comment string + type: str + hostname: + description: The hostname of the object + type: str + interface: + description: The reference name of the interface the object is associated with + type: str + resolved: + description: Whether the ipv4 address is resolved or not + type: bool + resolved6: + description: Whether the ipv6 address is resolved or not + type: bool + timeout: + description: The timeout until a new resolving will be attempted + type: int +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "network/dns_host" + key_to_check_for_changes = ["comment", "hostname", "interface"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + address=dict(type='str', required=False, default='0.0.0.0'), + address6=dict(type='str', required=False, default='::'), + comment=dict(type='str', required=False, default=""), + hostname=dict(type='str', required=False), + interface=dict(type='str', required=False, default=""), + resolved=dict(type='bool', required=False, default=False), + resolved6=dict(type='bool', required=False, default=False), + timeout=dict(type='int', required=False, default=0), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py b/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py new file mode 100644 index 000000000..a85a46aea --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Juergen Wiebe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_network_interface_address + +author: + - Juergen Wiebe (@steamx) + +short_description: Create, update or destroy network/interface_address object + +description: + - Create, update or destroy a network/interface_address object in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + address: + type: str + description: + - The ip4 address of the network/interface_address object. + required: true + address6: + type: str + description: + - The ip6 address of the network/interface_address object. + required: false + comment: + type: str + description: + - An optional comment to add to the object + default: '' + resolved: + type: bool + description: + - Whether or not the object is resolved + resolved6: + type: bool + description: + - Whether or not the object is resolved + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create a network interface address + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress + address: 0.0.0.0 + state: present + +- name: Remove a network interface address + network_interface_address: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress + address: 0.0.0.0 + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + address: + description: The ip4 address of the network/interface_address object + type: str + address6: + description: The ip6 address of the network/interface_address object + type: str + comment: + description: The comment string + type: str + resolved: + description: Whether or not the object is resolved + type: bool + resolved6: + description: Whether or not the object is resolved + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "network/interface_address" + key_to_check_for_changes = ["comment", "address"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + address=dict(type='str', required=True), + comment=dict(type='str', required=False, default=""), + address6=dict(type='str', required=False), + resolved=dict(type='bool', required=False), + resolved6=dict(type='bool', required=False), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py b/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py new file mode 100644 index 000000000..9dc08ad09 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Juergen Wiebe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_network_interface_address_info + +author: + - Juergen Wiebe (@steamx) + +short_description: Get info for a network/interface_address object + +description: + - Get info for a network/interface_address object in SOPHOS UTM. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = """ +- name: Get network interface address info + utm_proxy_interface_address_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestNetworkInterfaceAddress +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + address: + description: The ip4 address of the network/interface_address object + type: str + address6: + description: The ip6 address of the network/interface_address object + type: str + comment: + description: The comment string + type: str + resolved: + description: Whether or not the object is resolved + type: bool + resolved6: + description: Whether or not the object is resolved + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "network/interface_address" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True) + ), + supports_check_mode=True, + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py b/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py new file mode 100644 index 000000000..3b482483b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Stephan Schwarz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_auth_profile + +author: + - Stephan Schwarz (@stearz) + +short_description: Create, update or destroy reverse_proxy auth_profile entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + aaa: + type: list + elements: str + description: + - List of references to utm_aaa objects (allowed users or groups) + required: true + basic_prompt: + type: str + description: + - The message in the basic authentication prompt + required: true + backend_mode: + type: str + description: + - Specifies if the backend server needs authentication ([Basic|None]) + default: None + choices: + - Basic + - None + backend_strip_basic_auth: + description: + - Should the login data be stripped when proxying the request to the backend host + type: bool + default: true + backend_user_prefix: + type: str + description: + - Prefix string to prepend to the username for backend authentication + default: "" + backend_user_suffix: + type: str + description: + - Suffix string to append to the username for backend authentication + default: "" + comment: + type: str + description: + - Optional comment string + default: "" + frontend_cookie: + type: str + description: + - Frontend cookie name + frontend_cookie_secret: + type: str + description: + - Frontend cookie secret + frontend_form: + type: str + description: + - Frontend authentication form name + frontend_form_template: + type: str + description: + - Frontend authentication form template + default: "" + frontend_login: + type: str + description: + - Frontend login name + frontend_logout: + type: str + description: + - Frontend logout name + frontend_mode: + type: str + description: + - Frontend authentication mode (Form|Basic) + default: Basic + choices: + - Basic + - Form + frontend_realm: + type: str + description: + - Frontend authentication realm + frontend_session_allow_persistency: + description: + - Allow session persistency + type: bool + default: false + frontend_session_lifetime: + type: int + description: + - session lifetime + required: true + frontend_session_lifetime_limited: + description: + - Specifies if limitation of session lifetime is active + type: bool + default: true + frontend_session_lifetime_scope: + type: str + description: + - scope for frontend_session_lifetime (days|hours|minutes) + default: hours + choices: + - days + - hours + - minutes + frontend_session_timeout: + type: int + description: + - session timeout + required: true + frontend_session_timeout_enabled: + description: + - Specifies if session timeout is active + type: bool + default: true + frontend_session_timeout_scope: + type: str + description: + - scope for frontend_session_timeout (days|hours|minutes) + default: minutes + choices: + - days + - hours + - minutes + logout_delegation_urls: + type: list + elements: str + description: + - List of logout URLs that logouts are delegated to + default: [] + logout_mode: + type: str + description: + - Mode of logout (None|Delegation) + default: None + choices: + - None + - Delegation + redirect_to_requested_url: + description: + - Should a redirect to the requested URL be made + type: bool + default: false + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create UTM proxy_auth_profile + community.general.utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING] + basic_prompt: "Authentication required: Please login" + frontend_session_lifetime: 1 + frontend_session_timeout: 1 + state: present + +- name: Remove UTM proxy_auth_profile + community.general.utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + state: absent + +- name: Read UTM proxy_auth_profile + community.general.utm_proxy_auth_profile: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestAuthProfileEntry + state: info + +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + aaa: + description: List of references to utm_aaa objects (allowed users or groups) + type: list + basic_prompt: + description: The message in the basic authentication prompt + type: str + backend_mode: + description: Specifies if the backend server needs authentication ([Basic|None]) + type: str + backend_strip_basic_auth: + description: Should the login data be stripped when proxying the request to the backend host + type: bool + backend_user_prefix: + description: Prefix string to prepend to the username for backend authentication + type: str + backend_user_suffix: + description: Suffix string to append to the username for backend authentication + type: str + comment: + description: Optional comment string + type: str + frontend_cookie: + description: Frontend cookie name + type: str + frontend_form: + description: Frontend authentication form name + type: str + frontend_form_template: + description: Frontend authentication form template + type: str + frontend_login: + description: Frontend login name + type: str + frontend_logout: + description: Frontend logout name + type: str + frontend_mode: + description: Frontend authentication mode (Form|Basic) + type: str + frontend_realm: + description: Frontend authentication realm + type: str + frontend_session_allow_persistency: + description: Allow session persistency + type: bool + frontend_session_lifetime: + description: session lifetime + type: int + frontend_session_lifetime_limited: + description: Specifies if limitation of session lifetime is active + type: bool + frontend_session_lifetime_scope: + description: scope for frontend_session_lifetime (days|hours|minutes) + type: str + frontend_session_timeout: + description: session timeout + type: int + frontend_session_timeout_enabled: + description: Specifies if session timeout is active + type: bool + frontend_session_timeout_scope: + description: scope for frontend_session_timeout (days|hours|minutes) + type: str + logout_delegation_urls: + description: List of logout URLs that logouts are delegated to + type: list + logout_mode: + description: Mode of logout (None|Delegation) + type: str + redirect_to_requested_url: + description: Should a redirect to the requested URL be made + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/auth_profile" + key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth", + "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie", + "frontend_cookie_secret", "frontend_form", "frontend_form_template", + "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm", + "frontend_session_allow_persistency", "frontend_session_lifetime", + "frontend_session_lifetime_limited", "frontend_session_lifetime_scope", + "frontend_session_timeout", "frontend_session_timeout_enabled", + "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode", + "redirect_to_requested_url"] + + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + aaa=dict(type='list', elements='str', required=True), + basic_prompt=dict(type='str', required=True), + backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']), + backend_strip_basic_auth=dict(type='bool', required=False, default=True), + backend_user_prefix=dict(type='str', required=False, default=""), + backend_user_suffix=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + frontend_cookie=dict(type='str', required=False), + frontend_cookie_secret=dict(type='str', required=False, no_log=True), + frontend_form=dict(type='str', required=False), + frontend_form_template=dict(type='str', required=False, default=""), + frontend_login=dict(type='str', required=False), + frontend_logout=dict(type='str', required=False), + frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']), + frontend_realm=dict(type='str', required=False), + frontend_session_allow_persistency=dict(type='bool', required=False, default=False), + frontend_session_lifetime=dict(type='int', required=True), + frontend_session_lifetime_limited=dict(type='bool', required=False, default=True), + frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']), + frontend_session_timeout=dict(type='int', required=True), + frontend_session_timeout_enabled=dict(type='bool', required=False, default=True), + frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']), + logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]), + logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']), + redirect_to_requested_url=dict(type='bool', required=False, default=False) + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py b/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py new file mode 100644 index 000000000..a0a3f85b5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Sebastian Schenzel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_exception + +author: + - Sebastian Schenzel (@RickS-C137) + +short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - The name of the object. Will be used to identify the entry + required: true + type: str + op: + description: + - The operand to be used with the entries of the path parameter + default: 'AND' + choices: + - 'AND' + - 'OR' + required: false + type: str + path: + description: + - The paths the exception in the reverse proxy is defined for + type: list + elements: str + default: [] + required: false + skip_custom_threats_filters: + description: + - A list of threats to be skipped + type: list + elements: str + default: [] + required: false + skip_threats_filter_categories: + description: + - Define which categories of threats are skipped + type: list + elements: str + default: [] + required: false + skipav: + description: + - Skip the Antivirus Scanning + default: false + type: bool + required: false + skipbadclients: + description: + - Block clients with bad reputation + default: false + type: bool + required: false + skipcookie: + description: + - Skip the Cookie Signing check + default: false + type: bool + required: false + skipform: + description: + - Enable form hardening + default: false + type: bool + required: false + skipform_missingtoken: + description: + - Enable form hardening with missing tokens + default: false + type: bool + required: false + skiphtmlrewrite: + description: + - Protection against SQL + default: false + type: bool + required: false + skiptft: + description: + - Enable true file type control + default: false + type: bool + required: false + skipurl: + description: + - Enable static URL hardening + default: false + type: bool + required: false + source: + description: + - Define which categories of threats are skipped + type: list + elements: str + default: [] + required: false + status: + description: + - Status of the exception rule set + default: true + type: bool + required: false + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create UTM proxy_exception + community.general.utm_proxy_exception: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestExceptionEntry + backend: REF_OBJECT_STRING + state: present + +- name: Remove UTM proxy_exception + community.general.utm_proxy_exception: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestExceptionEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + comment: + description: The optional comment string + type: str + op: + description: The operand to be used with the entries of the path parameter + type: str + path: + description: The paths the exception in the reverse proxy is defined for + type: list + skip_custom_threats_filters: + description: A list of threats to be skipped + type: list + skip_threats_filter_categories: + description: Define which categories of threats are skipped + type: list + skipav: + description: Skip the Antivirus Scanning + type: bool + skipbadclients: + description: Block clients with bad reputation + type: bool + skipcookie: + description: Skip the Cookie Signing check + type: bool + skipform: + description: Enable form hardening + type: bool + skipform_missingtoken: + description: Enable form hardening with missing tokens + type: bool + skiphtmlrewrite: + description: Protection against SQL + type: bool + skiptft: + description: Enable true file type control + type: bool + skipurl: + description: Enable static URL hardening + type: bool + source: + description: Define which categories of threats are skipped + type: list +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/exception" + key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav", + "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken", + "skiphtmlrewrite", "skiptft", "skipurl", "source"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']), + path=dict(type='list', elements='str', required=False, default=[]), + skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]), + skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]), + skipav=dict(type='bool', required=False, default=False), + skipbadclients=dict(type='bool', required=False, default=False), + skipcookie=dict(type='bool', required=False, default=False), + skipform=dict(type='bool', required=False, default=False), + skipform_missingtoken=dict(type='bool', required=False, default=False), + skiphtmlrewrite=dict(type='bool', required=False, default=False), + skiptft=dict(type='bool', required=False, default=False), + skipurl=dict(type='bool', required=False, default=False), + source=dict(type='list', elements='str', required=False, default=[]), + status=dict(type='bool', required=False, default=True), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py new file mode 100644 index 000000000..22a773fef --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_frontend + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + add_content_type_header : + description: + - Whether to add the content type header or not + type: bool + default: false + address: + type: str + description: + - The reference name of the network/interface_address object. + default: REF_DefaultInternalAddress + allowed_networks: + type: list + elements: str + description: + - A list of reference names for the allowed networks. + default: ['REF_NetworkAny'] + certificate: + type: str + description: + - The reference name of the ca/host_key_cert object. + default: "" + comment: + type: str + description: + - An optional comment to add to the object + default: "" + disable_compression: + description: + - Whether to enable the compression + type: bool + default: false + domain: + type: list + elements: str + description: + - A list of domain names for the frontend object + exceptions: + type: list + elements: str + description: + - A list of exception ref names (reverse_proxy/exception) + default: [] + htmlrewrite: + description: + - Whether to enable html rewrite or not + type: bool + default: false + htmlrewrite_cookies: + description: + - Whether to enable html rewrite cookie or not + type: bool + default: false + implicitredirect: + description: + - Whether to enable implicit redirection or not + type: bool + default: false + lbmethod: + type: str + description: + - Which loadbalancer method should be used + choices: + - "" + - bybusyness + - bytraffic + - byrequests + default: bybusyness + locations: + type: list + elements: str + description: + - A list of location ref names (reverse_proxy/location) + default: [] + port: + type: int + description: + - The frontend http port + default: 80 + preservehost: + description: + - Whether to preserve host header + type: bool + default: false + profile: + type: str + description: + - The reference string of the reverse_proxy/profile + default: "" + status: + description: + - Whether to activate the frontend entry or not + type: bool + default: true + type: + type: str + description: + - Which protocol should be used + choices: + - http + - https + default: http + xheaders: + description: + - Whether to pass the host header or not + type: bool + default: false + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create utm proxy_frontend + community.general.utm_proxy_frontend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestFrontendEntry + host: REF_OBJECT_STRING + state: present + +- name: Remove utm proxy_frontend + community.general.utm_proxy_frontend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestFrontendEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + add_content_type_header: + description: Whether to add the content type header + type: bool + address: + description: The reference name of the address + type: str + allowed_networks: + description: List of reference names of networks associated + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert) + type: str + comment: + description: The comment string + type: str + disable_compression: + description: State of compression support + type: bool + domain: + description: List of hostnames + type: list + exceptions: + description: List of associated proxy exceptions + type: list + htmlrewrite: + description: State of html rewrite + type: bool + htmlrewrite_cookies: + description: Whether the html rewrite cookie will be set + type: bool + implicitredirect: + description: Whether to use implicit redirection + type: bool + lbmethod: + description: The method of loadbalancer to use + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object + type: list + port: + description: The port of the frontend connection + type: int + preservehost: + description: Preserve host header + type: bool + profile: + description: The associated reverse_proxy/profile + type: str + status: + description: Whether the frontend object is active or not + type: bool + type: + description: The connection type + type: str + xheaders: + description: The xheaders state + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/frontend" + key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate", + "comment", "disable_compression", "domain", "exceptions", "htmlrewrite", + "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations", + "port", "preservehost", "profile", "status", "type", "xheaders"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + add_content_type_header=dict(type='bool', required=False, default=False), + address=dict(type='str', required=False, default="REF_DefaultInternalAddress"), + allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]), + certificate=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + disable_compression=dict(type='bool', required=False, default=False), + domain=dict(type='list', elements='str', required=False), + exceptions=dict(type='list', elements='str', required=False, default=[]), + htmlrewrite=dict(type='bool', required=False, default=False), + htmlrewrite_cookies=dict(type='bool', required=False, default=False), + implicitredirect=dict(type='bool', required=False, default=False), + lbmethod=dict(type='str', required=False, default="bybusyness", + choices=['bybusyness', 'bytraffic', 'byrequests', '']), + locations=dict(type='list', elements='str', required=False, default=[]), + port=dict(type='int', required=False, default=80), + preservehost=dict(type='bool', required=False, default=False), + profile=dict(type='str', required=False, default=""), + status=dict(type='bool', required=False, default=True), + type=dict(type='str', required=False, default="http", choices=['http', 'https']), + xheaders=dict(type='bool', required=False, default=False), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py new file mode 100644 index 000000000..0435ef949 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_frontend_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = """ +- name: Get utm proxy_frontend + community.general.utm_proxy_frontend_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestBackendEntry + host: REF_OBJECT_STRING +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + add_content_type_header: + description: Whether to add the content type header + type: bool + address: + description: The reference name of the address + type: str + allowed_networks: + description: List of reference names of networks associated + type: list + certificate: + description: Reference name of certificate (ca/host_key_cert) + type: str + comment: + description: The comment string + type: str + disable_compression: + description: State of compression support + type: bool + domain: + description: List of hostnames + type: list + exceptions: + description: List of associated proxy exceptions + type: list + htmlrewrite: + description: State of html rewrite + type: bool + htmlrewrite_cookies: + description: whether the html rewrite cookie will be set + type: bool + implicitredirect: + description: whether to use implicit redirection + type: bool + lbmethod: + description: The method of loadbalancer to use + type: str + locations: + description: The reference names of reverse_proxy/locations associated with the object + type: list + port: + description: The port of the frontend connection + type: int + preservehost: + description: Preserve host header + type: bool + profile: + description: The associated reverse_proxy/profile + type: str + status: + description: Whether the frontend object is active or not + type: bool + type: + description: The connection type + type: str + xheaders: + description: The xheaders state + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/frontend" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py new file mode 100644 index 000000000..c22de7b92 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_location + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + access_control: + description: + - whether to activate the access control for the location + type: str + default: '0' + choices: + - '0' + - '1' + allowed_networks: + description: + - A list of allowed networks + type: list + elements: str + default: + - REF_NetworkAny + auth_profile: + type: str + description: + - The reference name of the auth profile + default: '' + backend: + type: list + elements: str + description: + - A list of backends that are connected with this location declaration + default: [] + be_path: + type: str + description: + - The path of the backend + default: '' + comment: + type: str + description: + - The optional comment string + default: '' + denied_networks: + type: list + elements: str + description: + - A list of denied network references + default: [] + hot_standby: + description: + - Activate hot standby mode + type: bool + default: false + path: + type: str + description: + - The path of the location + default: "/" + status: + description: + - Whether the location is active or not + type: bool + default: true + stickysession_id: + type: str + description: + - The stickysession id + default: ROUTEID + stickysession_status: + description: + - Enable the stickysession + type: bool + default: false + websocket_passthrough: + description: + - Enable the websocket passthrough + type: bool + default: false + +extends_documentation_fragment: +- community.general.utm +- community.general.attributes + +''' + +EXAMPLES = """ +- name: Create UTM proxy_location + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry + backend: REF_OBJECT_STRING + state: present + +- name: Remove UTM proxy_location + utm_proxy_backend: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry + state: absent +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + access_control: + description: Whether to use access control state + type: str + allowed_networks: + description: List of allowed network reference names + type: list + auth_profile: + description: The auth profile reference name + type: str + backend: + description: The backend reference name + type: str + be_path: + description: The backend path + type: str + comment: + description: The comment string + type: str + denied_networks: + description: The list of the denied network names + type: list + hot_standby: + description: Use hot standy + type: bool + path: + description: Path name + type: str + status: + description: Whether the object is active or not + type: bool + stickysession_id: + description: The identifier of the stickysession + type: str + stickysession_status: + description: Whether to use stickysession or not + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/location" + key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment", + "denied_networks", "hot_standby", "path", "status", "stickysession_id", + "stickysession_status", "websocket_passthrough"] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + access_control=dict(type='str', required=False, default="0", choices=['0', '1']), + allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']), + auth_profile=dict(type='str', required=False, default=""), + backend=dict(type='list', elements='str', required=False, default=[]), + be_path=dict(type='str', required=False, default=""), + comment=dict(type='str', required=False, default=""), + denied_networks=dict(type='list', elements='str', required=False, default=[]), + hot_standby=dict(type='bool', required=False, default=False), + path=dict(type='str', required=False, default="/"), + status=dict(type='bool', required=False, default=True), + stickysession_id=dict(type='str', required=False, default='ROUTEID'), + stickysession_status=dict(type='bool', required=False, default=False), + websocket_passthrough=dict(type='bool', required=False, default=False), + ) + ) + try: + UTM(module, endpoint, key_to_check_for_changes).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py new file mode 100644 index 000000000..58a32107b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: utm_proxy_location_info + +author: + - Johannes Brunswicker (@MatrixCrawler) + +short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM + +description: + - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM. + - This module needs to have the REST Ability of the UTM to be activated. + +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name: + type: str + description: + - The name of the object. Will be used to identify the entry + required: true + +extends_documentation_fragment: + - community.general.utm + - community.general.attributes + - community.general.attributes.info_module +''' + +EXAMPLES = """ +- name: Remove UTM proxy_location + community.general.utm_proxy_location_info: + utm_host: sophos.host.name + utm_token: abcdefghijklmno1234 + name: TestLocationEntry +""" + +RETURN = """ +result: + description: The utm object that was created + returned: success + type: complex + contains: + _ref: + description: The reference name of the object + type: str + _locked: + description: Whether or not the object is currently locked + type: bool + _type: + description: The type of the object + type: str + name: + description: The name of the object + type: str + access_control: + description: Whether to use access control state + type: str + allowed_networks: + description: List of allowed network reference names + type: list + auth_profile: + description: The auth profile reference name + type: str + backend: + description: The backend reference name + type: str + be_path: + description: The backend path + type: str + comment: + description: The comment string + type: str + denied_networks: + description: The list of the denied network names + type: list + hot_standby: + description: Use hot standy + type: bool + path: + description: Path name + type: str + status: + description: Whether the object is active or not + type: bool + stickysession_id: + description: The identifier of the stickysession + type: str + stickysession_status: + description: Whether to use stickysession or not + type: bool + websocket_passthrough: + description: Whether websocket passthrough will be used or not + type: bool +""" + +from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + endpoint = "reverse_proxy/location" + key_to_check_for_changes = [] + module = UTMModule( + argument_spec=dict( + name=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + try: + UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() + except Exception as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vdo.py b/ansible_collections/community/general/plugins/modules/vdo.py new file mode 100644 index 000000000..f1ea40e2e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vdo.py @@ -0,0 +1,781 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Red Hat, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +author: + - Bryan Gurney (@bgurney-rh) + +module: vdo + +short_description: Module to control VDO + + +description: + - This module controls the VDO dedupe and compression device. + - VDO, or Virtual Data Optimizer, is a device-mapper target that + provides inline block-level deduplication, compression, and + thin provisioning capabilities to primary storage. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - The name of the VDO volume. + type: str + required: true + state: + description: + - Whether this VDO volume should be "present" or "absent". + If a "present" VDO volume does not exist, it will be + created. If a "present" VDO volume already exists, it + will be modified, by updating the configuration, which + will take effect when the VDO volume is restarted. + Not all parameters of an existing VDO volume can be + modified; the "statusparamkeys" list contains the + parameters that can be modified after creation. If an + "absent" VDO volume does not exist, it will not be + removed. + type: str + choices: [ absent, present ] + default: present + activated: + description: + - The "activate" status for a VDO volume. If this is set + to C(false), the VDO volume cannot be started, and it will + not start on system startup. However, on initial + creation, a VDO volume with "activated" set to "off" + will be running, until stopped. This is the default + behavior of the "vdo create" command; it provides the + user an opportunity to write a base amount of metadata + (filesystem, LVM headers, etc.) to the VDO volume prior + to stopping the volume, and leaving it deactivated + until ready to use. + type: bool + running: + description: + - Whether this VDO volume is running. + - A VDO volume must be activated in order to be started. + type: bool + device: + description: + - The full path of the device to use for VDO storage. + - This is required if "state" is "present". + type: str + logicalsize: + description: + - The logical size of the VDO volume (in megabytes, or + LVM suffix format). If not specified for a new volume, + this defaults to the same size as the underlying storage + device, which is specified in the 'device' parameter. + Existing volumes will maintain their size if the + logicalsize parameter is not specified, or is smaller + than or identical to the current size. If the specified + size is larger than the current size, a growlogical + operation will be performed. + type: str + deduplication: + description: + - Configures whether deduplication is enabled. The + default for a created volume is 'enabled'. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + type: str + choices: [ disabled, enabled ] + compression: + description: + - Configures whether compression is enabled. The default + for a created volume is 'enabled'. Existing volumes + will maintain their previously configured setting unless + a different value is specified in the playbook. + type: str + choices: [ disabled, enabled ] + blockmapcachesize: + description: + - The amount of memory allocated for caching block map + pages, in megabytes (or may be issued with an LVM-style + suffix of K, M, G, or T). The default (and minimum) + value is 128M. The value specifies the size of the + cache; there is a 15% memory usage overhead. Each 1.25G + of block map covers 1T of logical blocks, therefore a + small amount of block map cache memory can cache a + significantly large amount of block map data. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + type: str + readcache: + description: + - Enables or disables the read cache. The default is + 'disabled'. Choosing 'enabled' enables a read cache + which may improve performance for workloads of high + deduplication, read workloads with a high level of + compression, or on hard disk storage. Existing + volumes will maintain their previously configured + setting unless a different value is specified in the + playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + choices: [ disabled, enabled ] + readcachesize: + description: + - Specifies the extra VDO device read cache size in + megabytes. This is in addition to a system-defined + minimum. Using a value with a suffix of K, M, G, or T + is optional. The default value is 0. 1.125 MB of + memory per bio thread will be used per 1 MB of read + cache specified (for example, a VDO volume configured + with 4 bio threads will have a read cache memory usage + overhead of 4.5 MB per 1 MB of read cache specified). + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + - The read cache feature is available in VDO 6.1 and older. + type: str + emulate512: + description: + - Enables 512-byte emulation mode, allowing drivers or + filesystems to access the VDO volume at 512-byte + granularity, instead of the default 4096-byte granularity. + Default is 'disabled'; only recommended when a driver + or filesystem requires 512-byte sector level access to + a device. This option is only available when creating + a new volume, and cannot be changed for an existing + volume. + type: bool + default: false + growphysical: + description: + - Specifies whether to attempt to execute a growphysical + operation, if there is enough unused space on the + device. A growphysical operation will be executed if + there is at least 64 GB of free space, relative to the + previous physical size of the affected VDO volume. + type: bool + default: false + slabsize: + description: + - The size of the increment by which the physical size of + a VDO volume is grown, in megabytes (or may be issued + with an LVM-style suffix of K, M, G, or T). Must be a + power of two between 128M and 32G. The default is 2G, + which supports volumes having a physical size up to 16T. + The maximum, 32G, supports a physical size of up to 256T. + This option is only available when creating a new + volume, and cannot be changed for an existing volume. + type: str + writepolicy: + description: + - Specifies the write policy of the VDO volume. The + 'sync' mode acknowledges writes only after data is on + stable storage. The 'async' mode acknowledges writes + when data has been cached for writing to stable + storage. The default (and highly recommended) 'auto' + mode checks the storage device to determine whether it + supports flushes. Devices that support flushes will + result in a VDO volume in 'async' mode, while devices + that do not support flushes will run in sync mode. + Existing volumes will maintain their previously + configured setting unless a different value is + specified in the playbook. + type: str + choices: [ async, auto, sync ] + indexmem: + description: + - Specifies the amount of index memory in gigabytes. The + default is 0.25. The special decimal values 0.25, 0.5, + and 0.75 can be used, as can any positive integer. + This option is only available when creating a new + volume, and cannot be changed for an existing volume. + type: str + indexmode: + description: + - Specifies the index mode of the Albireo index. The + default is 'dense', which has a deduplication window of + 1 GB of index memory per 1 TB of incoming data, + requiring 10 GB of index data on persistent storage. + The 'sparse' mode has a deduplication window of 1 GB of + index memory per 10 TB of incoming data, but requires + 100 GB of index data on persistent storage. This option + is only available when creating a new volume, and cannot + be changed for an existing volume. + type: str + choices: [ dense, sparse ] + ackthreads: + description: + - Specifies the number of threads to use for + acknowledging completion of requested VDO I/O operations. + Valid values are integer values from 1 to 100 (lower + numbers are preferable due to overhead). The default is + 1. Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + biothreads: + description: + - Specifies the number of threads to use for submitting I/O + operations to the storage device. Valid values are + integer values from 1 to 100 (lower numbers are + preferable due to overhead). The default is 4. + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + cputhreads: + description: + - Specifies the number of threads to use for CPU-intensive + work such as hashing or compression. Valid values are + integer values from 1 to 100 (lower numbers are + preferable due to overhead). The default is 2. + Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + logicalthreads: + description: + - Specifies the number of threads across which to + subdivide parts of the VDO processing based on logical + block addresses. Valid values are integer values from + 1 to 100 (lower numbers are preferable due to overhead). + The default is 1. Existing volumes will maintain their + previously configured setting unless a different value + is specified in the playbook. + type: str + physicalthreads: + description: + - Specifies the number of threads across which to + subdivide parts of the VDO processing based on physical + block addresses. Valid values are integer values from + 1 to 16 (lower numbers are preferable due to overhead). + The physical space used by the VDO volume must be + larger than (slabsize * physicalthreads). The default + is 1. Existing volumes will maintain their previously + configured setting unless a different value is specified + in the playbook. + type: str + force: + description: + - When creating a volume, ignores any existing file system + or VDO signature already present in the storage device. + When stopping or removing a VDO volume, first unmounts + the file system stored on the device if mounted. + - "B(Warning:) Since this parameter removes all safety + checks it is important to make sure that all parameters + provided are accurate and intentional." + type: bool + default: false + version_added: 2.4.0 +notes: + - In general, the default thread configuration should be used. +requirements: + - PyYAML + - kmod-kvdo + - vdo +''' + +EXAMPLES = r''' +- name: Create 2 TB VDO volume vdo1 on device /dev/md0 + community.general.vdo: + name: vdo1 + state: present + device: /dev/md0 + logicalsize: 2T + +- name: Remove VDO volume vdo1 + community.general.vdo: + name: vdo1 + state: absent +''' + +RETURN = r'''# ''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import re +import traceback + +YAML_IMP_ERR = None +try: + import yaml + HAS_YAML = True +except ImportError: + YAML_IMP_ERR = traceback.format_exc() + HAS_YAML = False + + +# Generate a list of VDO volumes, whether they are running or stopped. +# +# @param module The AnsibleModule object. +# @param vdocmd The path of the 'vdo' command. +# +# @return vdolist A list of currently created VDO volumes. +def inventory_vdos(module, vdocmd): + rc, vdostatusout, err = module.run_command([vdocmd, "status"]) + + # if rc != 0: + # module.fail_json(msg="Inventorying VDOs failed: %s" + # % vdostatusout, rc=rc, err=err) + + vdolist = [] + + if rc == 2 and re.findall(r"vdoconf\.yml does not exist", err, re.MULTILINE): + # If there is no /etc/vdoconf.yml file, assume there are no + # VDO volumes. Return an empty list of VDO volumes. + return vdolist + + if rc != 0: + module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err) + + vdostatusyaml = yaml.safe_load(vdostatusout) + if vdostatusyaml is None: + return vdolist + + vdoyamls = vdostatusyaml['VDOs'] + + if vdoyamls is not None: + vdolist = list(vdoyamls.keys()) + + return vdolist + + +def list_running_vdos(module, vdocmd): + rc, vdolistout, err = module.run_command([vdocmd, "list"]) + runningvdolist = filter(None, vdolistout.split('\n')) + return runningvdolist + + +# Generate a string containing options to pass to the 'VDO' command. +# Note that a 'create' operation will pass more options than a +# 'modify' operation. +# +# @param params A dictionary of parameters, and their values +# (values of 'None' and/or nonexistent values are ignored). +# +# @return vdocmdoptions A string to be used in a 'vdo ' command. +def start_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname]) + if rc == 0: + module.log("started VDO volume %s" % vdoname) + return rc + + +def stop_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname]) + if rc == 0: + module.log("stopped VDO volume %s" % vdoname) + return rc + + +def activate_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname]) + if rc == 0: + module.log("activated VDO volume %s" % vdoname) + return rc + + +def deactivate_vdo(module, vdoname, vdocmd): + rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname]) + if rc == 0: + module.log("deactivated VDO volume %s" % vdoname) + return rc + + +def add_vdooptions(params): + options = [] + + if params.get('logicalsize') is not None: + options.append("--vdoLogicalSize=" + params['logicalsize']) + + if params.get('blockmapcachesize') is not None: + options.append("--blockMapCacheSize=" + params['blockmapcachesize']) + + if params.get('readcache') == 'enabled': + options.append("--readCache=enabled") + + if params.get('readcachesize') is not None: + options.append("--readCacheSize=" + params['readcachesize']) + + if params.get('slabsize') is not None: + options.append("--vdoSlabSize=" + params['slabsize']) + + if params.get('emulate512'): + options.append("--emulate512=enabled") + + if params.get('indexmem') is not None: + options.append("--indexMem=" + params['indexmem']) + + if params.get('indexmode') == 'sparse': + options.append("--sparseIndex=enabled") + + if params.get('force'): + options.append("--force") + + # Entering an invalid thread config results in a cryptic + # 'Could not set up device mapper for %s' error from the 'vdo' + # command execution. The dmsetup module on the system will + # output a more helpful message, but one would have to log + # onto that system to read the error. For now, heed the thread + # limit warnings in the DOCUMENTATION section above. + if params.get('ackthreads') is not None: + options.append("--vdoAckThreads=" + params['ackthreads']) + + if params.get('biothreads') is not None: + options.append("--vdoBioThreads=" + params['biothreads']) + + if params.get('cputhreads') is not None: + options.append("--vdoCpuThreads=" + params['cputhreads']) + + if params.get('logicalthreads') is not None: + options.append("--vdoLogicalThreads=" + params['logicalthreads']) + + if params.get('physicalthreads') is not None: + options.append("--vdoPhysicalThreads=" + params['physicalthreads']) + + return options + + +def run_module(): + + # Define the available arguments/parameters that a user can pass to + # the module. + # Defaults for VDO parameters are None, in order to facilitate + # the detection of parameters passed from the playbook. + # Creation param defaults are determined by the creation section. + + module_args = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + activated=dict(type='bool'), + running=dict(type='bool'), + growphysical=dict(type='bool', default=False), + device=dict(type='str'), + logicalsize=dict(type='str'), + deduplication=dict(type='str', choices=['disabled', 'enabled']), + compression=dict(type='str', choices=['disabled', 'enabled']), + blockmapcachesize=dict(type='str'), + readcache=dict(type='str', choices=['disabled', 'enabled']), + readcachesize=dict(type='str'), + emulate512=dict(type='bool', default=False), + slabsize=dict(type='str'), + writepolicy=dict(type='str', choices=['async', 'auto', 'sync']), + indexmem=dict(type='str'), + indexmode=dict(type='str', choices=['dense', 'sparse']), + ackthreads=dict(type='str'), + biothreads=dict(type='str'), + cputhreads=dict(type='str'), + logicalthreads=dict(type='str'), + physicalthreads=dict(type='str'), + force=dict(type='bool', default=False), + ) + + # Seed the result dictionary in the object. There will be an + # 'invocation' dictionary added with 'module_args' (arguments + # given). + result = dict( + changed=False, + ) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + ) + + if not HAS_YAML: + module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR) + + vdocmd = module.get_bin_path("vdo", required=True) + if not vdocmd: + module.fail_json(msg='VDO is not installed.', **result) + + # Print a pre-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + + runningvdolist = list_running_vdos(module, vdocmd) + + # Collect the name of the desired VDO volume, and its state. These will + # determine what to do. + desiredvdo = module.params['name'] + state = module.params['state'] + + # Create a desired VDO volume that doesn't exist yet. + if (desiredvdo not in vdolist) and (state == 'present'): + device = module.params['device'] + if device is None: + module.fail_json(msg="Creating a VDO volume requires specifying " + "a 'device' in the playbook.") + + # Create a dictionary of the options from the AnsibleModule + # parameters, compile the vdo command options, and run "vdo create" + # with those options. + # Since this is a creation of a new VDO volume, it will contain all + # all of the parameters given by the playbook; the rest will + # assume default values. + vdocmdoptions = add_vdooptions(module.params) + rc, out, err = module.run_command( + [vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err) + + if module.params['compression'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo]) + + if module.params['deduplication'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo]) + + if module.params['activated'] is False: + deactivate_vdo(module, desiredvdo, vdocmd) + + if module.params['running'] is False: + stop_vdo(module, desiredvdo, vdocmd) + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + module.log("created VDO volume %s" % desiredvdo) + module.exit_json(**result) + + # Modify the current parameters of a VDO that exists. + if desiredvdo in vdolist and state == 'present': + rc, vdostatusoutput, err = module.run_command([vdocmd, "status"]) + vdostatusyaml = yaml.safe_load(vdostatusoutput) + + # An empty dictionary to contain dictionaries of VDO statistics + processedvdos = {} + + vdoyamls = vdostatusyaml['VDOs'] + if vdoyamls is not None: + processedvdos = vdoyamls + + # The 'vdo status' keys that are currently modifiable. + statusparamkeys = ['Acknowledgement threads', + 'Bio submission threads', + 'Block map cache size', + 'CPU-work threads', + 'Logical threads', + 'Physical threads', + 'Read cache', + 'Read cache size', + 'Configured write policy', + 'Compression', + 'Deduplication'] + + # A key translation table from 'vdo status' output to Ansible + # module parameters. This covers all of the 'vdo status' + # parameter keys that could be modified with the 'vdo' + # command. + vdokeytrans = { + 'Logical size': 'logicalsize', + 'Compression': 'compression', + 'Deduplication': 'deduplication', + 'Block map cache size': 'blockmapcachesize', + 'Read cache': 'readcache', + 'Read cache size': 'readcachesize', + 'Configured write policy': 'writepolicy', + 'Acknowledgement threads': 'ackthreads', + 'Bio submission threads': 'biothreads', + 'CPU-work threads': 'cputhreads', + 'Logical threads': 'logicalthreads', + 'Physical threads': 'physicalthreads' + } + + # Build a dictionary of the current VDO status parameters, with + # the keys used by VDO. (These keys will be converted later.) + currentvdoparams = {} + + # Build a "lookup table" dictionary containing a translation table + # of the parameters that can be modified + modtrans = {} + + for statfield in statusparamkeys: + if statfield in processedvdos[desiredvdo]: + currentvdoparams[statfield] = processedvdos[desiredvdo][statfield] + + modtrans[statfield] = vdokeytrans[statfield] + + # Build a dictionary of current parameters formatted with the + # same keys as the AnsibleModule parameters. + currentparams = {} + for paramkey in modtrans.keys(): + currentparams[modtrans[paramkey]] = modtrans[paramkey] + + diffparams = {} + + # Check for differences between the playbook parameters and the + # current parameters. This will need a comparison function; + # since AnsibleModule params are all strings, compare them as + # strings (but if it's None; skip). + for key in currentparams.keys(): + if module.params[key] is not None: + if str(currentparams[key]) != module.params[key]: + diffparams[key] = module.params[key] + + if diffparams: + vdocmdoptions = add_vdooptions(diffparams) + if vdocmdoptions: + rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Modifying VDO %s failed." + % desiredvdo, rc=rc, err=err) + + if 'deduplication' in diffparams.keys(): + dedupemod = diffparams['deduplication'] + dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication" + rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo]) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err) + + if 'compression' in diffparams.keys(): + compressmod = diffparams['compression'] + compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression" + rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo]) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err) + + if 'writepolicy' in diffparams.keys(): + writepolmod = diffparams['writepolicy'] + rc, out, err = module.run_command([ + vdocmd, + "changeWritePolicy", + "--name=%s" % desiredvdo, + "--writePolicy=%s" % writepolmod, + ]) + + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err) + + # Process the size parameters, to determine of a growPhysical or + # growLogical operation needs to occur. + sizeparamkeys = ['Logical size', ] + + currentsizeparams = {} + sizetrans = {} + for statfield in sizeparamkeys: + currentsizeparams[statfield] = processedvdos[desiredvdo][statfield] + sizetrans[statfield] = vdokeytrans[statfield] + + sizeparams = {} + for paramkey in currentsizeparams.keys(): + sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey] + + diffsizeparams = {} + for key in sizeparams.keys(): + if module.params[key] is not None and str(sizeparams[key]) != module.params[key]: + diffsizeparams[key] = module.params[key] + + if module.params['growphysical']: + physdevice = module.params['device'] + rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice]) + devblocks = (int(devsectors) / 8) + dmvdoname = ('/dev/mapper/' + desiredvdo) + currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname] + currentphysblocks = currentvdostats['physical blocks'] + + # Set a growPhysical threshold to grow only when there is + # guaranteed to be more than 2 slabs worth of unallocated + # space on the device to use. For now, set to device + # size + 64 GB, since 32 GB is the largest possible + # slab size. + growthresh = devblocks + 16777216 + + if currentphysblocks > growthresh: + result['changed'] = True + rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo]) + + if 'logicalsize' in diffsizeparams.keys(): + result['changed'] = True + rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']]) + + vdoactivatestatus = processedvdos[desiredvdo]['Activate'] + + if module.params['activated'] is False and vdoactivatestatus == 'enabled': + deactivate_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + if module.params['activated'] and vdoactivatestatus == 'disabled': + activate_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + if module.params['running'] is False and desiredvdo in runningvdolist: + stop_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + # Note that a disabled VDO volume cannot be started by the + # 'vdo start' command, by design. To accurately track changed + # status, don't try to start a disabled VDO volume. + # If the playbook contains 'activated: true', assume that + # the activate_vdo() operation succeeded, as 'vdoactivatestatus' + # will have the activated status prior to the activate_vdo() + # call. + if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist: + start_vdo(module, desiredvdo, vdocmd) + if not result['changed']: + result['changed'] = True + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + if diffparams: + module.log("modified parameters of VDO volume %s" % desiredvdo) + + module.exit_json(**result) + + # Remove a desired VDO that currently exists. + if desiredvdo in vdolist and state == 'absent': + rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo]) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err) + + # Print a post-run list of VDO volumes in the result object. + vdolist = inventory_vdos(module, vdocmd) + module.log("removed VDO volume %s" % desiredvdo) + module.exit_json(**result) + + # fall through + # The state for the desired VDO volume was absent, and it does + # not exist. Print a post-run list of VDO volumes in the result + # object. + vdolist = inventory_vdos(module, vdocmd) + module.log("received request to remove non-existent VDO volume %s" % desiredvdo) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vertica_configuration.py b/ansible_collections/community/general/plugins/modules/vertica_configuration.py new file mode 100644 index 000000000..09b80df3d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vertica_configuration.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vertica_configuration +short_description: Updates Vertica configuration parameters +description: + - Updates Vertica configuration parameters. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + parameter: + description: + - Name of the parameter to update. + required: true + aliases: [name] + type: str + value: + description: + - Value of the parameter to be set. + type: str + db: + description: + - Name of the Vertica database. + type: str + cluster: + description: + - Name of the Vertica cluster. + default: localhost + type: str + port: + description: + - Vertica cluster port to connect to. + default: '5433' + type: str + login_user: + description: + - The username used to authenticate with. + default: dbadmin + type: str + login_password: + description: + - The password used to authenticate with. + type: str +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: Updating load_balance_policy + community.general.vertica_configuration: name=failovertostandbyafter value='8 hours' +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)), + exception=traceback.format_exc()) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vertica_info.py b/ansible_collections/community/general/plugins/modules/vertica_info.py new file mode 100644 index 000000000..3106be3b3 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vertica_info.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vertica_info +short_description: Gathers Vertica database facts +description: + - Gathers Vertica database information. + - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts). + Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)! +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + cluster: + description: + - Name of the cluster running the schema. + default: localhost + type: str + port: + description: + Database port to connect to. + default: '5433' + type: str + db: + description: + - Name of the database running the schema. + type: str + login_user: + description: + - The username used to authenticate with. + default: dbadmin + type: str + login_password: + description: + - The password used to authenticate with. + type: str +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: Gathering vertica facts + community.general.vertica_info: db=db_name + register: result + +- name: Print schemas + ansible.builtin.debug: + msg: "{{ result.vertica_schemas }}" +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class NotSupportedError(Exception): + pass + +# module specific functions + + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None, no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + db = '' + if module.params['db']: + db = module.params['db'] + + try: + dsn = ( + "Driver=Vertica;" + "Server=%s;" + "Port=%s;" + "Database=%s;" + "User=%s;" + "Password=%s;" + "ConnectionLoadBalance=%s" + ) % (module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc()) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + + module.exit_json(changed=False, + vertica_schemas=schema_facts, + vertica_users=user_facts, + vertica_roles=role_facts, + vertica_configuration=configuration_facts, + vertica_nodes=node_facts) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vertica_role.py b/ansible_collections/community/general/plugins/modules/vertica_role.py new file mode 100644 index 000000000..704594a12 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vertica_role.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vertica_role +short_description: Adds or removes Vertica database roles and assigns roles to them +description: + - Adds or removes Vertica database role and, optionally, assign other roles. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + role: + description: + - Name of the role to add or remove. + required: true + type: str + aliases: ['name'] + assigned_roles: + description: + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] + type: str + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a role. + choices: ['present', 'absent'] + default: present + type: str + db: + description: + - Name of the Vertica database. + type: str + cluster: + description: + - Name of the Vertica cluster. + default: localhost + type: str + port: + description: + - Vertica cluster port to connect to. + default: '5433' + type: str + login_user: + description: + - The username used to authenticate with. + default: dbadmin + type: str + login_password: + description: + - The password used to authenticate with. + type: str +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: Creating a new vertica role + community.general.vertica_role: name=role_name db=db_name state=present + +- name: Creating a new vertica role with other role assigned + community.general.vertica_role: name=role_name assigned_role=other_role_name state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']): + return False + return True + + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])): + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vertica_schema.py b/ansible_collections/community/general/plugins/modules/vertica_schema.py new file mode 100644 index 000000000..01f8f721e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vertica_schema.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vertica_schema +short_description: Adds or removes Vertica database schema and roles +description: + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + schema: + description: + - Name of the schema to add or remove. + required: true + aliases: ['name'] + type: str + usage_roles: + description: + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] + type: str + create_roles: + description: + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] + type: str + owner: + description: + - Name of the user to set as owner of the schema. + type: str + state: + description: + - Whether to create C(present), or drop C(absent) a schema. + default: present + choices: ['present', 'absent'] + type: str + db: + description: + - Name of the Vertica database. + type: str + cluster: + description: + - Name of the Vertica cluster. + default: localhost + type: str + port: + description: + - Vertica cluster port to connect to. + default: '5433' + type: str + login_user: + description: + - The username used to authenticate with. + default: dbadmin + type: str + login_password: + description: + - The password used to authenticate with. + type: str +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: Creating a new vertica schema + community.general.vertica_schema: name=schema_name db=db_name state=present + +- name: Creating a new schema with specific schema owner + community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: Creating a new schema with roles + community.general.vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']): + return False + if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): + return False + return True + + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \ + sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']): + + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(aliases=['usage_role']), + create_roles=dict(aliases=['create_role']), + owner=dict(), + state=dict(default='present', choices=['absent', 'present']), + db=dict(), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e))) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vertica_user.py b/ansible_collections/community/general/plugins/modules/vertica_user.py new file mode 100644 index 000000000..a6a5b5951 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vertica_user.py @@ -0,0 +1,393 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vertica_user +short_description: Adds or removes Vertica database users and assigns roles +description: + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + user: + description: + - Name of the user to add or remove. + required: true + type: str + aliases: ['name'] + profile: + description: + - Sets the user's profile. + type: str + resource_pool: + description: + - Sets the user's resource pool. + type: str + password: + description: + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + type: str + expired: + description: + - Sets the user's password expiration. + type: bool + ldap: + description: + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). + type: bool + roles: + description: + - Comma separated list of roles to assign to the user. + aliases: ['role'] + type: str + state: + description: + - Whether to create C(present), drop C(absent) or lock C(locked) a user. + choices: ['present', 'absent', 'locked'] + default: present + type: str + db: + description: + - Name of the Vertica database. + type: str + cluster: + description: + - Name of the Vertica cluster. + default: localhost + type: str + port: + description: + - Vertica cluster port to connect to. + default: '5433' + type: str + login_user: + description: + - The username used to authenticate with. + default: dbadmin + type: str + login_password: + description: + - The password used to authenticate with. + type: str +notes: + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: "Dariusz Owczarek (@dareko)" +''' + +EXAMPLES = """ +- name: Creating a new vertica user with password + community.general.vertica_user: name=user_name password=md5 db=db_name state=present + +- name: Creating a new vertica user authenticated via ldap with roles assigned + community.general.vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" +import traceback + +PYODBC_IMP_ERR = None +try: + import pyodbc +except ImportError: + PYODBC_IMP_ERR = traceback.format_exc() + pyodbc_found = False +else: + pyodbc_found = True + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class NotSupportedError(Exception): + pass + + +class CannotDropError(Exception): + pass + +# module specific functions + + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')): + return False + if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or + sorted(roles) != sorted(user_facts[user_key]['default_roles'])): + return False + return True + + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + if locked: + state = 'lock' + else: + state = 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or + sorted(roles) != sorted(user_facts[user_key]['default_roles'])): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(), + resource_pool=dict(), + password=dict(no_log=True), + expired=dict(type='bool'), + ldap=dict(type='bool'), + roles=dict(aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(no_log=True), + ), supports_check_mode=True) + + if not pyodbc_found: + module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception as e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError as e: + module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vexata_eg.py b/ansible_collections/community/general/plugins/modules/vexata_eg.py new file mode 100644 index 000000000..457d1fa9e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vexata_eg.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Sandeep Kasargod (sandeep@vexata.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: vexata_eg +short_description: Manage export groups on Vexata VX100 storage arrays +description: + - Create or delete export groups on a Vexata VX100 array. + - An export group is a tuple of a volume group, initiator group and port + group that allows a set of volumes to be exposed to one or more hosts + through specific array ports. +author: + - Sandeep Kasargod (@vexata) +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Export group name. + required: true + type: str + state: + description: + - Creates export group when present or delete when absent. + default: present + choices: [ present, absent ] + type: str + vg: + description: + - Volume group name. + type: str + ig: + description: + - Initiator group name. + type: str + pg: + description: + - Port group name. + type: str +extends_documentation_fragment: +- community.general.vexata.vx100 +- community.general.attributes + +''' + +EXAMPLES = r''' +- name: Create export group named db_export. + community.general.vexata_eg: + name: db_export + vg: dbvols + ig: dbhosts + pg: pg1 + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Delete export group named db_export + community.general.vexata_eg: + name: db_export + state: absent + array: vx100_ultra.test.com + user: admin + password: secret +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vexata import ( + argument_spec, get_array, required_together) + + +def get_eg(module, array): + """Retrieve a named vg if it exists, None if absent.""" + name = module.params['name'] + try: + egs = array.list_egs() + eg = filter(lambda eg: eg['name'] == name, egs) + if len(eg) == 1: + return eg[0] + else: + return None + except Exception: + module.fail_json(msg='Error while attempting to retrieve export groups.') + + +def get_vg_id(module, array): + """Retrieve a named vg's id if it exists, error if absent.""" + name = module.params['vg'] + try: + vgs = array.list_vgs() + vg = filter(lambda vg: vg['name'] == name, vgs) + if len(vg) == 1: + return vg[0]['id'] + else: + module.fail_json(msg='Volume group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve volume groups.') + + +def get_ig_id(module, array): + """Retrieve a named ig's id if it exists, error if absent.""" + name = module.params['ig'] + try: + igs = array.list_igs() + ig = filter(lambda ig: ig['name'] == name, igs) + if len(ig) == 1: + return ig[0]['id'] + else: + module.fail_json(msg='Initiator group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve initiator groups.') + + +def get_pg_id(module, array): + """Retrieve a named pg's id if it exists, error if absent.""" + name = module.params['pg'] + try: + pgs = array.list_pgs() + pg = filter(lambda pg: pg['name'] == name, pgs) + if len(pg) == 1: + return pg[0]['id'] + else: + module.fail_json(msg='Port group {0} was not found.'.format(name)) + except Exception: + module.fail_json(msg='Error while attempting to retrieve port groups.') + + +def create_eg(module, array): + """"Create a new export group.""" + changed = False + eg_name = module.params['name'] + vg_id = get_vg_id(module, array) + ig_id = get_ig_id(module, array) + pg_id = get_pg_id(module, array) + if module.check_mode: + module.exit_json(changed=changed) + + try: + eg = array.create_eg( + eg_name, + 'Ansible export group', + (vg_id, ig_id, pg_id)) + if eg: + module.log(msg='Created export group {0}'.format(eg_name)) + changed = True + else: + raise Exception + except Exception: + module.fail_json(msg='Export group {0} create failed.'.format(eg_name)) + module.exit_json(changed=changed) + + +def delete_eg(module, array, eg): + changed = False + eg_name = eg['name'] + if module.check_mode: + module.exit_json(changed=changed) + + try: + ok = array.delete_eg( + eg['id']) + if ok: + module.log(msg='Export group {0} deleted.'.format(eg_name)) + changed = True + else: + raise Exception + except Exception: + module.fail_json(msg='Export group {0} delete failed.'.format(eg_name)) + module.exit_json(changed=changed) + + +def main(): + arg_spec = argument_spec() + arg_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + vg=dict(type='str'), + ig=dict(type='str'), + pg=dict(type='str') + ) + ) + + module = AnsibleModule(arg_spec, + supports_check_mode=True, + required_together=required_together()) + + state = module.params['state'] + array = get_array(module) + eg = get_eg(module, array) + + if state == 'present' and not eg: + create_eg(module, array) + elif state == 'absent' and eg: + delete_eg(module, array, eg) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vexata_volume.py b/ansible_collections/community/general/plugins/modules/vexata_volume.py new file mode 100644 index 000000000..7fdfc7e5f --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vexata_volume.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Sandeep Kasargod (sandeep@vexata.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: vexata_volume +short_description: Manage volumes on Vexata VX100 storage arrays +description: + - Create, deletes or extend volumes on a Vexata VX100 array. +author: +- Sandeep Kasargod (@vexata) +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Volume name. + required: true + type: str + state: + description: + - Creates/Modifies volume when present or removes when absent. + default: present + choices: [ present, absent ] + type: str + size: + description: + - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes. + type: str +extends_documentation_fragment: +- community.general.vexata.vx100 +- community.general.attributes + +''' + +EXAMPLES = r''' +- name: Create new 2 TiB volume named foo + community.general.vexata_volume: + name: foo + size: 2T + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Expand volume named foo to 4 TiB + community.general.vexata_volume: + name: foo + size: 4T + state: present + array: vx100_ultra.test.com + user: admin + password: secret + +- name: Delete volume named foo + community.general.vexata_volume: + name: foo + state: absent + array: vx100_ultra.test.com + user: admin + password: secret +''' + +RETURN = r''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.vexata import ( + argument_spec, get_array, required_together, size_to_MiB) + + +def get_volume(module, array): + """Retrieve a named volume if it exists, None if absent.""" + name = module.params['name'] + try: + vols = array.list_volumes() + vol = filter(lambda v: v['name'] == name, vols) + if len(vol) == 1: + return vol[0] + else: + return None + except Exception: + module.fail_json(msg='Error while attempting to retrieve volumes.') + + +def validate_size(module, err_msg): + size = module.params.get('size', False) + if not size: + module.fail_json(msg=err_msg) + size = size_to_MiB(size) + if size <= 0: + module.fail_json(msg='Invalid volume size, must be [MGT].') + return size + + +def create_volume(module, array): + """"Create a new volume.""" + changed = False + size = validate_size(module, err_msg='Size is required to create volume.') + if module.check_mode: + module.exit_json(changed=changed) + + try: + vol = array.create_volume( + module.params['name'], + 'Ansible volume', + size) + if vol: + module.log(msg='Created volume {0}'.format(vol['id'])) + changed = True + else: + module.fail_json(msg='Volume create failed.') + except Exception: + pass + module.exit_json(changed=changed) + + +def update_volume(module, array, volume): + """Expand the volume size.""" + changed = False + size = validate_size(module, err_msg='Size is required to update volume') + prev_size = volume['volSize'] + if size <= prev_size: + module.log(msg='Volume expanded size needs to be larger ' + 'than current size.') + if module.check_mode: + module.exit_json(changed=changed) + + try: + vol = array.grow_volume( + volume['name'], + volume['description'], + volume['id'], + size) + if vol: + changed = True + except Exception: + pass + + module.exit_json(changed=changed) + + +def delete_volume(module, array, volume): + changed = False + vol_name = volume['name'] + if module.check_mode: + module.exit_json(changed=changed) + + try: + ok = array.delete_volume( + volume['id']) + if ok: + module.log(msg='Volume {0} deleted.'.format(vol_name)) + changed = True + else: + raise Exception + except Exception: + pass + module.exit_json(changed=changed) + + +def main(): + arg_spec = argument_spec() + arg_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent']), + size=dict(type='str') + ) + ) + + module = AnsibleModule(arg_spec, + supports_check_mode=True, + required_together=required_together()) + + state = module.params['state'] + array = get_array(module) + volume = get_volume(module, array) + + if state == 'present': + if not volume: + create_volume(module, array) + else: + update_volume(module, array, volume) + elif state == 'absent' and volume: + delete_volume(module, array, volume) + else: + module.exit_json(changed=False) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/vmadm.py b/ansible_collections/community/general/plugins/modules/vmadm.py new file mode 100644 index 000000000..56ade17e4 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/vmadm.py @@ -0,0 +1,790 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: vmadm +short_description: Manage SmartOS virtual machines and zones +description: + - Manage SmartOS virtual machines through vmadm(1M). +author: Jasper Lievisse Adriaanse (@jasperla) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + archive_on_delete: + required: false + description: + - When enabled, the zone dataset will be mounted on C(/zones/archive) + upon removal. + type: bool + autoboot: + required: false + description: + - Whether or not a VM is booted when the system is rebooted. + type: bool + brand: + choices: [ joyent, joyent-minimal, lx, kvm, bhyve ] + default: joyent + description: + - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0. + type: str + boot: + required: false + description: + - Set the boot order for KVM VMs. + type: str + cpu_cap: + required: false + description: + - Sets a limit on the amount of CPU time that can be used by a VM. + Use C(0) for no cap. + type: int + cpu_shares: + required: false + description: + - Sets a limit on the number of fair share scheduler (FSS) CPU shares for + a VM. This limit is relative to all other VMs on the system. + type: int + cpu_type: + required: false + choices: [ qemu64, host ] + default: qemu64 + description: + - Control the type of virtual CPU exposed to KVM VMs. + type: str + customer_metadata: + required: false + description: + - Metadata to be set and associated with this VM, this contain customer + modifiable keys. + type: dict + delegate_dataset: + required: false + description: + - Whether to delegate a ZFS dataset to an OS VM. + type: bool + disk_driver: + required: false + description: + - Default value for a virtual disk model for KVM guests. + type: str + disks: + required: false + description: + - A list of disks to add, valid properties are documented in vmadm(1M). + type: list + elements: dict + dns_domain: + required: false + description: + - Domain value for C(/etc/hosts). + type: str + docker: + required: false + description: + - Docker images need this flag enabled along with the I(brand) set to C(lx). + type: bool + filesystems: + required: false + description: + - Mount additional filesystems into an OS VM. + type: list + elements: dict + firewall_enabled: + required: false + description: + - Enables the firewall, allowing fwadm(1M) rules to be applied. + type: bool + force: + required: false + description: + - Force a particular action (i.e. stop or delete a VM). + type: bool + fs_allowed: + required: false + description: + - Comma separated list of filesystem types this zone is allowed to mount. + type: str + hostname: + required: false + description: + - Zone/VM hostname. + type: str + image_uuid: + required: false + description: + - Image UUID. + type: str + indestructible_delegated: + required: false + description: + - Adds an C(@indestructible) snapshot to delegated datasets. + type: bool + indestructible_zoneroot: + required: false + description: + - Adds an C(@indestructible) snapshot to zoneroot. + type: bool + internal_metadata: + required: false + description: + - Metadata to be set and associated with this VM, this contains operator + generated keys. + type: dict + internal_metadata_namespace: + required: false + description: + - List of namespaces to be set as I(internal_metadata-only); these namespaces + will come from I(internal_metadata) rather than I(customer_metadata). + type: str + kernel_version: + required: false + description: + - Kernel version to emulate for LX VMs. + type: str + limit_priv: + required: false + description: + - Set (comma separated) list of privileges the zone is allowed to use. + type: str + maintain_resolvers: + required: false + description: + - Resolvers in C(/etc/resolv.conf) will be updated when updating + the I(resolvers) property. + type: bool + max_locked_memory: + required: false + description: + - Total amount of memory (in MiBs) on the host that can be locked by this VM. + type: int + max_lwps: + required: false + description: + - Maximum number of lightweight processes this VM is allowed to have running. + type: int + max_physical_memory: + required: false + description: + - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use. + type: int + max_swap: + required: false + description: + - Maximum amount of virtual memory (in MiBs) the VM is allowed to use. + type: int + mdata_exec_timeout: + required: false + description: + - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service + that runs user-scripts in the zone. + type: int + name: + required: false + aliases: [ alias ] + description: + - Name of the VM. vmadm(1M) uses this as an optional name. + type: str + nic_driver: + required: false + description: + - Default value for a virtual NIC model for KVM guests. + type: str + nics: + required: false + description: + - A list of nics to add, valid properties are documented in vmadm(1M). + type: list + elements: dict + nowait: + required: false + description: + - Consider the provisioning complete when the VM first starts, rather than + when the VM has rebooted. + type: bool + qemu_opts: + required: false + description: + - Additional qemu arguments for KVM guests. This overwrites the default arguments + provided by vmadm(1M) and should only be used for debugging. + type: str + qemu_extra_opts: + required: false + description: + - Additional qemu cmdline arguments for KVM guests. + type: str + quota: + required: false + description: + - Quota on zone filesystems (in MiBs). + type: int + ram: + required: false + description: + - Amount of virtual RAM for a KVM guest (in MiBs). + type: int + resolvers: + required: false + description: + - List of resolvers to be put into C(/etc/resolv.conf). + type: list + elements: str + routes: + required: false + description: + - Dictionary that maps destinations to gateways, these will be set as static + routes in the VM. + type: dict + spice_opts: + required: false + description: + - Addition options for SPICE-enabled KVM VMs. + type: str + spice_password: + required: false + description: + - Password required to connect to SPICE. By default no password is set. + Please note this can be read from the Global Zone. + type: str + state: + choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ] + default: running + description: + - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted) + operate on a VM that is currently provisioned. C(present) means that the VM will be + created if it was absent, and that it will be in a running state. C(absent) will + shutdown the zone before removing it. + C(stopped) means the zone will be created if it doesn't exist already, before shutting + it down. + type: str + tmpfs: + required: false + description: + - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem. + type: int + uuid: + required: false + description: + - UUID of the VM. Can either be a full UUID or C(*) for all VMs. + type: str + vcpus: + required: false + description: + - Number of virtual CPUs for a KVM guest. + type: int + vga: + required: false + description: + - Specify VGA emulation used by KVM VMs. + type: str + virtio_txburst: + required: false + description: + - Number of packets that can be sent in a single flush of the tx queue of virtio NICs. + type: int + virtio_txtimer: + required: false + description: + - Timeout (in nanoseconds) for the TX timer of virtio NICs. + type: int + vnc_password: + required: false + description: + - Password required to connect to VNC. By default no password is set. + Please note this can be read from the Global Zone. + type: str + vnc_port: + required: false + description: + - TCP port to listen of the VNC server. Or set C(0) for random, + or C(-1) to disable. + type: int + zfs_data_compression: + required: false + description: + - Specifies compression algorithm used for this VMs data dataset. This option + only has effect on delegated datasets. + type: str + zfs_data_recsize: + required: false + description: + - Suggested block size (power of 2) for files in the delegated dataset's filesystem. + type: int + zfs_filesystem_limit: + required: false + description: + - Maximum number of filesystems the VM can have. + type: int + zfs_io_priority: + required: false + description: + - IO throttle priority value relative to other VMs. + type: int + zfs_root_compression: + required: false + description: + - Specifies compression algorithm used for this VMs root dataset. This option + only has effect on the zoneroot dataset. + type: str + zfs_root_recsize: + required: false + description: + - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem. + type: int + zfs_snapshot_limit: + required: false + description: + - Number of snapshots the VM can have. + type: int + zpool: + required: false + description: + - ZFS pool the VM's zone dataset will be created in. + type: str +requirements: + - python >= 2.6 +''' + +EXAMPLES = ''' +- name: Create SmartOS zone + community.general.vmadm: + brand: joyent + state: present + alias: fw_zone + image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5 + firewall_enabled: true + indestructible_zoneroot: true + nics: + - nic_tag: admin + ip: dhcp + primary: true + internal_metadata: + root_pw: 'secret' + quota: 1 + +- name: Delete a zone + community.general.vmadm: + alias: test_zone + state: deleted + +- name: Stop all zones + community.general.vmadm: + uuid: '*' + state: stopped +''' + +RETURN = ''' +uuid: + description: UUID of the managed VM. + returned: always + type: str + sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33' +alias: + description: Alias of the managed VM. + returned: When addressing a VM by alias. + type: str + sample: 'dns-zone' +state: + description: State of the target, after execution. + returned: success + type: str + sample: 'running' +''' + +import json +import os +import re +import tempfile +import traceback + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +# While vmadm(1M) supports a -E option to return any errors in JSON, the +# generated JSON does not play well with the JSON parsers of Python. +# The returned message contains '\n' as part of the stacktrace, +# which breaks the parsers. + + +def get_vm_prop(module, uuid, prop): + # Lookup a property for the given VM. + # Returns the property, or None if not found. + cmd = [module.vmadm, 'lookup', '-j', '-o', prop, 'uuid={0}'.format(uuid)] + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json( + msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr) + + try: + stdout_json = json.loads(stdout) + except Exception as e: + module.fail_json( + msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop), + details=to_native(e), exception=traceback.format_exc()) + + if stdout_json: + return stdout_json[0].get(prop) + + +def get_vm_uuid(module, alias): + # Lookup the uuid that goes with the given alias. + # Returns the uuid or '' if not found. + cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid', 'alias={0}'.format(alias)] + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json( + msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr) + + # If no VM was found matching the given alias, we get back an empty array. + # That is not an error condition as we might be explicitly checking it's + # absence. + try: + stdout_json = json.loads(stdout) + except Exception as e: + module.fail_json( + msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias), + details=to_native(e), exception=traceback.format_exc()) + + if stdout_json: + return stdout_json[0].get('uuid') + + +def get_all_vm_uuids(module): + # Retrieve the UUIDs for all VMs. + cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid'] + + (rc, stdout, stderr) = module.run_command(cmd) + + if rc != 0: + module.fail_json(msg='Failed to get VMs list', exception=stderr) + + try: + stdout_json = json.loads(stdout) + return [v['uuid'] for v in stdout_json] + except Exception as e: + module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e), + exception=traceback.format_exc()) + + +def new_vm(module, uuid, vm_state): + payload_file = create_payload(module, uuid) + + (rc, dummy, stderr) = vmadm_create_vm(module, payload_file) + + if rc != 0: + changed = False + module.fail_json(msg='Could not create VM', exception=stderr) + else: + changed = True + # 'vmadm create' returns all output to stderr... + match = re.match('Successfully created VM (.*)', stderr) + if match: + vm_uuid = match.groups()[0] + if not is_valid_uuid(vm_uuid): + module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid)) + else: + module.fail_json(msg='Could not retrieve UUID of newly created(?) VM') + + # Now that the VM is created, ensure it is in the desired state (if not 'running') + if vm_state != 'running': + ret = set_vm_state(module, vm_uuid, vm_state) + if not ret: + module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state)) + + try: + os.unlink(payload_file) + except Exception as e: + # Since the payload may contain sensitive information, fail hard + # if we cannot remove the file so the operator knows about it. + module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)), + exception=traceback.format_exc()) + + return changed, vm_uuid + + +def vmadm_create_vm(module, payload_file): + # Create a new VM using the provided payload. + cmd = [module.vmadm, 'create', '-f', payload_file] + + return module.run_command(cmd) + + +def set_vm_state(module, vm_uuid, vm_state): + p = module.params + + # Check if the VM is already in the desired state. + state = get_vm_prop(module, vm_uuid, 'state') + if state and (state == vm_state): + return None + + # Lookup table for the state to be in, and which command to use for that. + # vm_state: [vmadm commandm, forceable?] + cmds = { + 'stopped': ['stop', True], + 'running': ['start', False], + 'deleted': ['delete', True], + 'rebooted': ['reboot', False] + } + + command, forceable = cmds[vm_state] + force = ['-F'] if p['force'] and forceable else [] + + cmd = [module.vmadm, command] + force + [vm_uuid] + + (dummy, dummy, stderr) = module.run_command(cmd) + + match = re.match('^Successfully.*', stderr) + return match is not None + + +def create_payload(module, uuid): + # Create the JSON payload (vmdef) and return the filename. + + # Filter out the few options that are not valid VM properties. + module_options = ['force', 'state'] + # @TODO make this a simple {} comprehension as soon as py2 is ditched + # @TODO {k: v for k, v in p.items() if k not in module_options} + vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v]) + + try: + vmdef_json = json.dumps(vmdef) + except Exception as e: + module.fail_json( + msg='Could not create valid JSON payload', exception=traceback.format_exc()) + + # Create the temporary file that contains our payload, and set tight + # permissions for it may container sensitive information. + try: + # XXX: When there's a way to get the current ansible temporary directory + # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain + # the payload (thus removing the `save_payload` option). + fname = tempfile.mkstemp()[1] + os.chmod(fname, 0o400) + with open(fname, 'w') as fh: + fh.write(vmdef_json) + except Exception as e: + module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc()) + + return fname + + +def vm_state_transition(module, uuid, vm_state): + ret = set_vm_state(module, uuid, vm_state) + + # Whether the VM changed state. + if ret is None: + return False + elif ret: + return True + else: + module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state)) + + +def is_valid_uuid(uuid): + return re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE) is not None + + +def validate_uuids(module): + failed = [ + name + for name, pvalue in [(x, module.params[x]) for x in ['uuid', 'image_uuid']] + if pvalue and pvalue != '*' and not is_valid_uuid(pvalue) + ] + + if failed: + module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed))) + + +def manage_all_vms(module, vm_state): + # Handle operations for all VMs, which can by definition only + # be state transitions. + state = module.params['state'] + + if state == 'created': + module.fail_json(msg='State "created" is only valid for tasks with a single VM') + + # If any of the VMs has a change, the task as a whole has a change. + any_changed = False + + # First get all VM uuids and for each check their state, and adjust it if needed. + for uuid in get_all_vm_uuids(module): + current_vm_state = get_vm_prop(module, uuid, 'state') + if not current_vm_state and vm_state == 'deleted': + any_changed = False + else: + if module.check_mode: + if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): + any_changed = True + else: + any_changed = vm_state_transition(module, uuid, vm_state) or any_changed + + return any_changed + + +def main(): + # In order to reduce the clutter and boilerplate for trivial options, + # abstract the vmadm properties and build the dict of arguments later. + # Dict of all options that are simple to define based on their type. + # They're not required and have a default of None. + properties = { + 'str': [ + 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', + 'image_uuid', 'internal_metadata_namespace', 'kernel_version', + 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', + 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', + 'zfs_root_compression', 'zpool' + ], + 'bool': [ + 'archive_on_delete', 'autoboot', 'delegate_dataset', + 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', + 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' + ], + 'int': [ + 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', + 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', + 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', + 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', + 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', + 'zfs_snapshot_limit' + ], + 'dict': ['customer_metadata', 'internal_metadata', 'routes'], + } + + # Start with the options that are not as trivial as those above. + options = dict( + state=dict( + default='running', + type='str', + choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] + ), + name=dict( + type='str', + aliases=['alias'] + ), + brand=dict( + default='joyent', + type='str', + choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve'] + ), + cpu_type=dict( + default='qemu64', + type='str', + choices=['host', 'qemu64'] + ), + # Regular strings, however these require additional options. + spice_password=dict(type='str', no_log=True), + vnc_password=dict(type='str', no_log=True), + disks=dict(type='list', elements='dict'), + nics=dict(type='list', elements='dict'), + resolvers=dict(type='list', elements='str'), + filesystems=dict(type='list', elements='dict'), + ) + + # Add our 'simple' options to options dict. + for type in properties: + for p in properties[type]: + option = dict(type=type) + options[p] = option + + module = AnsibleModule( + argument_spec=options, + supports_check_mode=True, + required_one_of=[['name', 'uuid']] + ) + + module.vmadm = module.get_bin_path('vmadm', required=True) + + p = module.params + uuid = p['uuid'] + state = p['state'] + + # Translate the state parameter into something we can use later on. + if state in ['present', 'running']: + vm_state = 'running' + elif state in ['stopped', 'created']: + vm_state = 'stopped' + elif state in ['absent', 'deleted']: + vm_state = 'deleted' + elif state in ['restarted', 'rebooted']: + vm_state = 'rebooted' + + result = {'state': state} + + # While it's possible to refer to a given VM by it's `alias`, it's easier + # to operate on VMs by their UUID. So if we're not given a `uuid`, look + # it up. + if not uuid: + uuid = get_vm_uuid(module, p['name']) + # Bit of a chicken and egg problem here for VMs with state == deleted. + # If they're going to be removed in this play, we have to lookup the + # uuid. If they're already deleted there's nothing to lookup. + # So if state == deleted and get_vm_uuid() returned '', the VM is already + # deleted and there's nothing else to do. + if uuid is None and vm_state == 'deleted': + result['name'] = p['name'] + module.exit_json(**result) + + validate_uuids(module) + + if p['name']: + result['name'] = p['name'] + result['uuid'] = uuid + + if uuid == '*': + result['changed'] = manage_all_vms(module, vm_state) + module.exit_json(**result) + + # The general flow is as follows: + # - first the current state of the VM is obtained by it's UUID. + # - If the state was not found and the desired state is 'deleted', return. + # - If the state was not found, it means the VM has to be created. + # Subsequently the VM will be set to the desired state (i.e. stopped) + # - Otherwise, it means the VM exists already and we operate on it's + # state (i.e. reboot it.) + # + # In the future it should be possible to query the VM for a particular + # property as a valid state (i.e. queried) so the result can be + # registered. + # Also, VMs should be able to get their properties updated. + # Managing VM snapshots should be part of a standalone module. + + # First obtain the VM state to determine what needs to be done with it. + current_vm_state = get_vm_prop(module, uuid, 'state') + + # First handle the case where the VM should be deleted and is not present. + if not current_vm_state and vm_state == 'deleted': + result['changed'] = False + elif module.check_mode: + # Shortcut for check mode, if there is no VM yet, it will need to be created. + # Or, if the VM is not in the desired state yet, it needs to transition. + result['changed'] = (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state) + elif not current_vm_state: + # No VM was found that matched the given ID (alias or uuid), so we create it. + result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) + else: + # VM was found, operate on its state directly. + result['changed'] = vm_state_transition(module, uuid, vm_state) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/wakeonlan.py b/ansible_collections/community/general/plugins/modules/wakeonlan.py new file mode 100644 index 000000000..6d7e09452 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/wakeonlan.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: wakeonlan +short_description: Send a magic Wake-on-LAN (WoL) broadcast packet +description: + - The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + mac: + description: + - MAC address to send Wake-on-LAN broadcast packet for. + required: true + type: str + broadcast: + description: + - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. + default: 255.255.255.255 + type: str + port: + description: + - UDP port to use for magic Wake-on-LAN packet. + default: 7 + type: int +todo: + - Add arping support to check whether the system is up (before and after) + - Enable check-mode support (when we have arping support) + - Does not have SecureOn password support +notes: + - This module sends a magic packet, without knowing whether it worked + - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) + - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first). +seealso: +- module: community.windows.win_wakeonlan +author: +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 + community.general.wakeonlan: + mac: '00:00:5E:00:53:66' + broadcast: 192.0.2.23 + delegate_to: localhost + +- community.general.wakeonlan: + mac: 00:00:5E:00:53:66 + port: 9 + delegate_to: localhost +''' + +RETURN = r''' +# Default return values +''' +import socket +import struct +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def wakeonlan(module, mac, broadcast, port): + """ Send a magic Wake-on-LAN packet. """ + + mac_orig = mac + + # Remove possible separator from MAC address + if len(mac) == 12 + 5: + mac = mac.replace(mac[2], '') + + # If we don't end up with 12 hexadecimal characters, fail + if len(mac) != 12: + module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) + + # Test if it converts to an integer, otherwise fail + try: + int(mac, 16) + except ValueError: + module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) + + # Create payload for magic packet + data = b'' + padding = ''.join(['FFFFFFFFFFFF', mac * 20]) + for i in range(0, len(padding), 2): + data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) + + # Broadcast payload to network + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + + if not module.check_mode: + + try: + sock.sendto(data, (broadcast, port)) + except socket.error as e: + sock.close() + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + sock.close() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mac=dict(type='str', required=True), + broadcast=dict(type='str', default='255.255.255.255'), + port=dict(type='int', default=7), + ), + supports_check_mode=True, + ) + + mac = module.params['mac'] + broadcast = module.params['broadcast'] + port = module.params['port'] + + wakeonlan(module, mac, broadcast, port) + + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py new file mode 100644 index 000000000..a51d454d9 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: wdc_redfish_command +short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs +version_added: 5.4.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action. + - Manages OOB controller firmware. For example, Firmware Activate, Update and Activate. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + description: + - Base URI of OOB controller. Must include this or I(ioms). + type: str + ioms: + description: + - List of IOM FQDNs for the enclosure. Must include this or I(baseuri). + type: list + elements: str + username: + description: + - User for authentication with OOB controller. + type: str + password: + description: + - Password for authentication with OOB controller. + type: str + auth_token: + description: + - Security token for authentication with OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + resource_id: + required: false + description: + - ID of the component to modify, such as C(Enclosure), C(IOModuleAFRU), C(PowerSupplyBFRU), C(FanExternalFRU3), or C(FanInternalFRU). + type: str + version_added: 5.4.0 + update_image_uri: + required: false + description: + - The URI of the image for the update. + type: str + update_creds: + required: false + description: + - The credentials for retrieving the update image. + type: dict + suboptions: + username: + required: false + description: + - The username for retrieving the update image. + type: str + password: + required: false + description: + - The password for retrieving the update image. + type: str +notes: + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - ioms is a list of FQDNs for the enclosure's IOMs. + + +author: Mike Moerk (@mikemoerk) +''' + +EXAMPLES = ''' +- name: Firmware Activate (required after SimpleUpdate to apply the new firmware) + community.general.wdc_redfish_command: + category: Update + command: FWActivate + ioms: "{{ ioms }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Firmware Activate with individual IOMs specified + community.general.wdc_redfish_command: + category: Update + command: FWActivate + ioms: + - iom1.wdc.com + - iom2.wdc.com + username: "{{ username }}" + password: "{{ password }}" + +- name: Firmware Activate with baseuri specified + community.general.wdc_redfish_command: + category: Update + command: FWActivate + baseuri: "iom1.wdc.com" + username: "{{ username }}" + password: "{{ password }}" + + +- name: Update and Activate (orchestrates firmware update and activation with a single command) + community.general.wdc_redfish_command: + category: Update + command: UpdateAndActivate + ioms: "{{ ioms }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: "{{ update_image_uri }}" + update_creds: + username: operator + password: supersecretpwd + +- name: Turn on enclosure indicator LED + community.general.wdc_redfish_command: + category: Chassis + resource_id: Enclosure + command: IndicatorLedOn + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn off IOM A indicator LED + community.general.wdc_redfish_command: + category: Chassis + resource_id: IOModuleAFRU + command: IndicatorLedOff + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn on Power Supply B indicator LED + community.general.wdc_redfish_command: + category: Chassis + resource_id: PowerSupplyBFRU + command: IndicatorLedOn + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn on External Fan 3 indicator LED + community.general.wdc_redfish_command: + category: Chassis + resource_id: FanExternalFRU3 + command: IndicatorLedOn + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn on Internal Fan indicator LED + community.general.wdc_redfish_command: + category: Chassis + resource_id: FanInternalFRU + command: IndicatorLedOn + username: "{{ username }}" + password: "{{ password }}" + +- name: Set chassis to Low Power Mode + community.general.wdc_redfish_command: + category: Chassis + resource_id: Enclosure + command: PowerModeLow + +- name: Set chassis to Normal Power Mode + community.general.wdc_redfish_command: + category: Chassis + resource_id: Enclosure + command: PowerModeNormal + +''' + +RETURN = ''' +msg: + description: Message with action result or error description + returned: always + type: str + sample: "Action was successful" +''' + +from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +CATEGORY_COMMANDS_ALL = { + "Update": [ + "FWActivate", + "UpdateAndActivate" + ], + "Chassis": [ + "IndicatorLedOn", + "IndicatorLedOff", + "PowerModeLow", + "PowerModeNormal", + ] +} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + ioms=dict(type='list', elements='str'), + baseuri=dict(), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) + ), + resource_id=dict(), + update_image_uri=dict(), + timeout=dict(type='int', default=10) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ('baseuri', 'ioms') + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # Resource to modify + resource_id = module.params['resource_id'] + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Build root URI(s) + if module.params.get("baseuri") is not None: + root_uris = ["https://" + module.params['baseuri']] + else: + root_uris = [ + "https://" + iom for iom in module.params['ioms'] + ] + rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, + resource_id=resource_id, data_modification=True) + + # Organize by Categories / Commands + + if category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + # update options + update_opts = { + 'update_creds': module.params['update_creds'] + } + for command in command_list: + if command == "FWActivate": + if module.check_mode: + result = { + 'ret': True, + 'changed': True, + 'msg': 'FWActivate not performed in check mode.' + } + else: + result = rf_utils.firmware_activate(update_opts) + elif command == "UpdateAndActivate": + update_opts["update_image_uri"] = module.params['update_image_uri'] + result = rf_utils.update_and_activate(update_opts) + + elif category == "Chassis": + result = rf_utils._find_chassis_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + led_commands = ["IndicatorLedOn", "IndicatorLedOff"] + + # Check if more than one led_command is present + num_led_commands = sum([command in led_commands for command in command_list]) + if num_led_commands > 1: + result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + else: + for command in command_list: + if command.startswith("IndicatorLed"): + result = rf_utils.manage_chassis_indicator_led(command) + elif command.startswith("PowerMode"): + result = rf_utils.manage_chassis_power_mode(command) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + module.exit_json(changed=changed, + session=session, + msg='Action was successful' if not module.check_mode else result.get( + 'msg', "No action performed in check mode." + )) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py new file mode 100644 index 000000000..038e1a72d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: wdc_redfish_info +short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs +version_added: 5.4.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + get information back. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + description: + - Base URI of OOB controller. Must include this or I(ioms). + type: str + ioms: + description: + - List of IOM FQDNs for the enclosure. Must include this or I(baseuri). + type: list + elements: str + username: + description: + - User for authentication with OOB controller. + type: str + password: + description: + - Password for authentication with OOB controller. + type: str + auth_token: + description: + - Security token for authentication with OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + +notes: + - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section. + - ioms is a list of FQDNs for the enclosure's IOMs. + +author: Mike Moerk (@mikemoerk) +''' + +EXAMPLES = ''' +- name: Get Simple Update Status with individual IOMs specified + community.general.wdc_redfish_info: + category: Update + command: SimpleUpdateStatus + ioms: + - iom1.wdc.com + - iom2.wdc.com + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}" + +- name: Get Simple Update Status with baseuri specified + community.general.wdc_redfish_info: + category: Update + command: SimpleUpdateStatus + baseuri: "iom1.wdc.com" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}" +''' + +RETURN = ''' +Description: + description: Firmware update status description. + returned: always + type: str + sample: Ready for FW update +ErrorCode: + description: Numeric error code for firmware update status. Non-zero indicates an error condition. + returned: always + type: int + sample: 0 +EstimatedRemainingMinutes: + description: Estimated number of minutes remaining in firmware update operation. + returned: always + type: int + sample: 20 +StatusCode: + description: Firmware update status code. + returned: always + type: int + sample: 2 +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils + +CATEGORY_COMMANDS_ALL = { + "Update": ["SimpleUpdateStatus"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + ioms=dict(type='list', elements='str'), + baseuri=dict(), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ('baseuri', 'ioms') + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Build root URI(s) + if module.params.get("baseuri") is not None: + root_uris = ["https://" + module.params['baseuri']] + else: + root_uris = [ + "https://" + iom for iom in module.params['ioms'] + ] + rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module, + resource_id=None, + data_modification=False + ) + + # Organize by Categories / Commands + + if category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + for command in command_list: + if command == "SimpleUpdateStatus": + simple_update_status_result = rf_utils.get_simple_update_status() + if simple_update_status_result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del simple_update_status_result['ret'] + result["simple_update_status"] = simple_update_status_result + module.exit_json(changed=False, redfish_facts=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/webfaction_app.py b/ansible_collections/community/general/plugins/modules/webfaction_app.py new file mode 100644 index 000000000..7a4702675 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/webfaction_app.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create a Webfaction application using Ansible and the Webfaction API +# +# Valid application types can be found by looking here: +# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: webfaction_app +short_description: Add or remove applications on a Webfaction host +description: + - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - The name of the application + required: true + type: str + + state: + description: + - Whether the application should exist + choices: ['present', 'absent'] + default: "present" + type: str + + type: + description: + - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. + required: true + type: str + + autostart: + description: + - Whether the app should restart with an C(autostart.cgi) script + type: bool + default: false + + extra_info: + description: + - Any extra parameters required by the app + default: '' + type: str + + port_open: + description: + - IF the port should be opened + type: bool + default: false + + login_name: + description: + - The webfaction account to use + required: true + type: str + + login_password: + description: + - The webfaction password to use + required: true + type: str + + machine: + description: + - The machine name to use (optional for accounts with only one machine) + type: str + +''' + +EXAMPLES = ''' + - name: Create a test app + community.general.webfaction_app: + name: "my_wsgi_app1" + state: present + type: mod_wsgi35-python27 + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + machine: "{{webfaction_machine}}" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + type=dict(required=True), + autostart=dict(type='bool', default=False), + extra_info=dict(default=""), + port_open=dict(type='bool', default=False), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + machine=dict(), + ), + supports_check_mode=True + ) + app_name = module.params['name'] + app_type = module.params['type'] + app_state = module.params['state'] + + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + app_list = webfaction.list_apps(session_id) + app_map = dict([(i['name'], i) for i in app_list]) + existing_app = app_map.get(app_name) + + result = {} + + # Here's where the real stuff happens + + if app_state == 'present': + + # Does an app with this name already exist? + if existing_app: + if existing_app['type'] != app_type: + module.fail_json(msg="App already exists with different type. Please fix by hand.") + + # If it exists with the right type, we don't change it + # Should check other parameters. + module.exit_json( + changed=False, + result=existing_app, + ) + + if not module.check_mode: + # If this isn't a dry run, create the app + result.update( + webfaction.create_app( + session_id, app_name, app_type, + module.boolean(module.params['autostart']), + module.params['extra_info'], + module.boolean(module.params['port_open']) + ) + ) + + elif app_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_app: + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_app(session_id, app_name) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(app_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/webfaction_db.py b/ansible_collections/community/general/plugins/modules/webfaction_db.py new file mode 100644 index 000000000..c4742cb21 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/webfaction_db.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: +# * Andy Baker +# * Federico Tarantini +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create a webfaction database using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: webfaction_db +short_description: Add or remove a database on Webfaction +description: + - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + + name: + description: + - The name of the database + required: true + type: str + + state: + description: + - Whether the database should exist + choices: ['present', 'absent'] + default: "present" + type: str + + type: + description: + - The type of database to create. + required: true + choices: ['mysql', 'postgresql'] + type: str + + password: + description: + - The password for the new database user. + type: str + + login_name: + description: + - The webfaction account to use + required: true + type: str + + login_password: + description: + - The webfaction password to use + required: true + type: str + + machine: + description: + - The machine name to use (optional for accounts with only one machine) + type: str +''' + +EXAMPLES = ''' + # This will also create a default DB user with the same + # name as the database, and the specified password. + + - name: Create a database + community.general.webfaction_db: + name: "{{webfaction_user}}_db1" + password: mytestsql + type: mysql + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + machine: "{{webfaction_machine}}" + + # Note that, for symmetry's sake, deleting a database using + # 'state: absent' will also delete the matching user. + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + type=dict(required=True, choices=['mysql', 'postgresql']), + password=dict(no_log=True), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + machine=dict(), + ), + supports_check_mode=True + ) + db_name = module.params['name'] + db_state = module.params['state'] + db_type = module.params['type'] + db_passwd = module.params['password'] + + if module.params['machine']: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'], + module.params['machine'] + ) + else: + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + db_list = webfaction.list_dbs(session_id) + db_map = dict([(i['name'], i) for i in db_list]) + existing_db = db_map.get(db_name) + + user_list = webfaction.list_db_users(session_id) + user_map = dict([(i['username'], i) for i in user_list]) + existing_user = user_map.get(db_name) + + result = {} + + # Here's where the real stuff happens + + if db_state == 'present': + + # Does a database with this name already exist? + if existing_db: + # Yes, but of a different type - fail + if existing_db['db_type'] != db_type: + module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") + + # If it exists with the right type, we don't change anything. + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, create the db + # and default user. + result.update( + webfaction.create_db( + session_id, db_name, db_type, db_passwd + ) + ) + + elif db_state == 'absent': + + # If this isn't a dry run... + if not module.check_mode: + + if not (existing_db or existing_user): + module.exit_json(changed=False,) + + if existing_db: + # Delete the db if it exists + result.update( + webfaction.delete_db(session_id, db_name, db_type) + ) + + if existing_user: + # Delete the default db user if it exists + result.update( + webfaction.delete_db_user(session_id, db_name, db_type) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(db_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/webfaction_domain.py new file mode 100644 index 000000000..9bffec3cd --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/webfaction_domain.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Quentin Stafford-Fraser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create Webfaction domains and subdomains using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: webfaction_domain +short_description: Add or remove domains and subdomains on Webfaction +description: + - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - If you are I(deleting) domains by using I(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. + If you don't specify subdomains, the domain will be deleted. + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + name: + description: + - The name of the domain + required: true + type: str + + state: + description: + - Whether the domain should exist + choices: ['present', 'absent'] + default: "present" + type: str + + subdomains: + description: + - Any subdomains to create. + default: [] + type: list + elements: str + + login_name: + description: + - The webfaction account to use + required: true + type: str + + login_password: + description: + - The webfaction password to use + required: true + type: str +''' + +EXAMPLES = ''' + - name: Create a test domain + community.general.webfaction_domain: + name: mydomain.com + state: present + subdomains: + - www + - blog + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + + - name: Delete test domain and any subdomains + community.general.webfaction_domain: + name: mydomain.com + state: absent + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" + +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + subdomains=dict(default=[], type='list', elements='str'), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + domain_name = module.params['name'] + domain_state = module.params['state'] + domain_subdomains = module.params['subdomains'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + domain_list = webfaction.list_domains(session_id) + domain_map = dict([(i['domain'], i) for i in domain_list]) + existing_domain = domain_map.get(domain_name) + + result = {} + + # Here's where the real stuff happens + + if domain_state == 'present': + + # Does an app with this name already exist? + if existing_domain: + + if set(existing_domain['subdomains']) >= set(domain_subdomains): + # If it exists with the right subdomains, we don't change anything. + module.exit_json( + changed=False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, create the app + # print positional_args + result.update( + webfaction.create_domain( + *positional_args + ) + ) + + elif domain_state == 'absent': + + # If the app's already not there, nothing changed. + if not existing_domain: + module.exit_json( + changed=False, + ) + + positional_args = [session_id, domain_name] + domain_subdomains + + if not module.check_mode: + # If this isn't a dry run, delete the app + result.update( + webfaction.delete_domain(*positional_args) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py new file mode 100644 index 000000000..2b543c5b1 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Quentin Stafford-Fraser and Andy Baker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create webfaction mailbox using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: webfaction_mailbox +short_description: Add or remove mailboxes on Webfaction +description: + - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + mailbox_name: + description: + - The name of the mailbox + required: true + type: str + + mailbox_password: + description: + - The password for the mailbox + required: true + type: str + + state: + description: + - Whether the mailbox should exist + choices: ['present', 'absent'] + default: "present" + type: str + + login_name: + description: + - The webfaction account to use + required: true + type: str + + login_password: + description: + - The webfaction password to use + required: true + type: str +''' + +EXAMPLES = ''' + - name: Create a mailbox + community.general.webfaction_mailbox: + mailbox_name="mybox" + mailbox_password="myboxpw" + state=present + login_name={{webfaction_user}} + login_password={{webfaction_passwd}} +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + mailbox_name=dict(required=True), + mailbox_password=dict(required=True, no_log=True), + state=dict(required=False, choices=['present', 'absent'], default='present'), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + + mailbox_name = module.params['mailbox_name'] + site_state = module.params['state'] + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] + existing_mailbox = mailbox_name in mailbox_list + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a mailbox with this name already exist? + if existing_mailbox: + module.exit_json(changed=False,) + + positional_args = [session_id, mailbox_name] + + if not module.check_mode: + # If this isn't a dry run, create the mailbox + result.update(webfaction.create_mailbox(*positional_args)) + + elif site_state == 'absent': + + # If the mailbox is already not there, nothing changed. + if not existing_mailbox: + module.exit_json(changed=False) + + if not module.check_mode: + # If this isn't a dry run, delete the mailbox + result.update(webfaction.delete_mailbox(session_id, mailbox_name)) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(site_state)) + + module.exit_json(changed=True, result=result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/webfaction_site.py b/ansible_collections/community/general/plugins/modules/webfaction_site.py new file mode 100644 index 000000000..385f55211 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/webfaction_site.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Quentin Stafford-Fraser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Create Webfaction website using Ansible and the Webfaction API + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: webfaction_site +short_description: Add or remove a website on a Webfaction host +description: + - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. +author: Quentin Stafford-Fraser (@quentinsf) +notes: + - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP + address. You can use a DNS name. + - If a site of the same name exists in the account but on a different host, the operation will exit. + - > + You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. + The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as + your host, you may want to add C(serial: 1) to the plays. + - See `the webfaction API `_ for more info. + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + + name: + description: + - The name of the website + required: true + type: str + + state: + description: + - Whether the website should exist + choices: ['present', 'absent'] + default: "present" + type: str + + host: + description: + - The webfaction host on which the site should be created. + required: true + type: str + + https: + description: + - Whether or not to use HTTPS + type: bool + default: false + + site_apps: + description: + - A mapping of URLs to apps + default: [] + type: list + elements: list + + subdomains: + description: + - A list of subdomains associated with this site. + default: [] + type: list + elements: str + + login_name: + description: + - The webfaction account to use + required: true + type: str + + login_password: + description: + - The webfaction password to use + required: true + type: str +''' + +EXAMPLES = ''' + - name: Create website + community.general.webfaction_site: + name: testsite1 + state: present + host: myhost.webfaction.com + subdomains: + - 'testsite1.my_domain.org' + site_apps: + - ['testapp1', '/'] + https: false + login_name: "{{webfaction_user}}" + login_password: "{{webfaction_passwd}}" +''' + +import socket + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import xmlrpc_client + + +webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + # You can specify an IP address or hostname. + host=dict(required=True), + https=dict(required=False, type='bool', default=False), + subdomains=dict(type='list', elements='str', default=[]), + site_apps=dict(type='list', elements='list', default=[]), + login_name=dict(required=True), + login_password=dict(required=True, no_log=True), + ), + supports_check_mode=True + ) + site_name = module.params['name'] + site_state = module.params['state'] + site_host = module.params['host'] + site_ip = socket.gethostbyname(site_host) + + session_id, account = webfaction.login( + module.params['login_name'], + module.params['login_password'] + ) + + site_list = webfaction.list_websites(session_id) + site_map = dict([(i['name'], i) for i in site_list]) + existing_site = site_map.get(site_name) + + result = {} + + # Here's where the real stuff happens + + if site_state == 'present': + + # Does a site with this name already exist? + if existing_site: + + # If yes, but it's on a different IP address, then fail. + # If we wanted to allow relocation, we could add a 'relocate=true' option + # which would get the existing IP address, delete the site there, and create it + # at the new address. A bit dangerous, perhaps, so for now we'll require manual + # deletion if it's on another host. + + if existing_site['ip'] != site_ip: + module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") + + # If it's on this host and the key parameters are the same, nothing needs to be done. + + if (existing_site['https'] == module.boolean(module.params['https'])) and \ + (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ + (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): + module.exit_json( + changed=False + ) + + positional_args = [ + session_id, site_name, site_ip, + module.boolean(module.params['https']), + module.params['subdomains'], + ] + for a in module.params['site_apps']: + positional_args.append((a[0], a[1])) + + if not module.check_mode: + # If this isn't a dry run, create or modify the site + result.update( + webfaction.create_website( + *positional_args + ) if not existing_site else webfaction.update_website( + *positional_args + ) + ) + + elif site_state == 'absent': + + # If the site's already not there, nothing changed. + if not existing_site: + module.exit_json( + changed=False, + ) + + if not module.check_mode: + # If this isn't a dry run, delete the site + result.update( + webfaction.delete_website(session_id, site_name, site_ip) + ) + + else: + module.fail_json(msg="Unknown state specified: {0}".format(site_state)) + + module.exit_json( + changed=True, + result=result + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xattr.py b/ansible_collections/community/general/plugins/modules/xattr.py new file mode 100644 index 000000000..0b44fdaad --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xattr.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: xattr +short_description: Manage user defined extended attributes +description: + - Manages filesystem user defined extended attributes. + - Requires that extended attributes are enabled on the target filesystem + and that the setfattr/getfattr utilities are present. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The full path of the file/object to get the facts of. + - Before 2.3 this option was only usable as I(name). + type: path + required: true + aliases: [ name ] + namespace: + description: + - Namespace of the named name/key. + type: str + default: user + key: + description: + - The name of a specific Extended attribute key to set/retrieve. + type: str + value: + description: + - The value to set the named name/key to, it automatically sets the I(state) to C(present). + type: str + state: + description: + - defines which state you want to do. + C(read) retrieves the current value for a I(key) (default) + C(present) sets I(path) to C(value), default if value is set + C(all) dumps all data + C(keys) retrieves all keys + C(absent) deletes the key + type: str + choices: [ absent, all, keys, present, read ] + default: read + follow: + description: + - If C(true), dereferences symlinks and sets/gets attributes on symlink target, + otherwise acts on symlink itself. + type: bool + default: true +notes: + - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. +author: + - Brian Coca (@bcoca) +''' + +EXAMPLES = ''' +- name: Obtain the extended attributes of /etc/foo.conf + community.general.xattr: + path: /etc/foo.conf + +- name: Set the key 'user.foo' to value 'bar' + community.general.xattr: + path: /etc/foo.conf + key: foo + value: bar + +- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914' + community.general.xattr: + path: /mnt/bricks/brick1 + namespace: trusted + key: glusterfs.volume-id + value: "0x817b94343f164f199e5b573b4ea1f914" + +- name: Remove the key 'user.foo' + community.general.xattr: + path: /etc/foo.conf + key: foo + state: absent + +- name: Remove the key 'trusted.glusterfs.volume-id' + community.general.xattr: + path: /mnt/bricks/brick1 + namespace: trusted + key: glusterfs.volume-id + state: absent +''' + +import os + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def get_xattr_keys(module, path, follow): + cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] + + if not follow: + cmd.append('-h') + cmd.append(path) + + return _run_xattr(module, cmd) + + +def get_xattr(module, path, key, follow): + cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] + + if not follow: + cmd.append('-h') + if key is None: + cmd.append('-d') + else: + cmd.append('-n') + cmd.append(key) + cmd.append(path) + + return _run_xattr(module, cmd, False) + + +def set_xattr(module, path, key, value, follow): + + cmd = [module.get_bin_path('setfattr', True)] + if not follow: + cmd.append('-h') + cmd.append('-n') + cmd.append(key) + cmd.append('-v') + cmd.append(value) + cmd.append(path) + + return _run_xattr(module, cmd) + + +def rm_xattr(module, path, key, follow): + + cmd = [module.get_bin_path('setfattr', True)] + if not follow: + cmd.append('-h') + cmd.append('-x') + cmd.append(key) + cmd.append(path) + + return _run_xattr(module, cmd, False) + + +def _run_xattr(module, cmd, check_rc=True): + + try: + (rc, out, err) = module.run_command(cmd, check_rc=check_rc) + except Exception as e: + module.fail_json(msg="%s!" % to_native(e)) + + # result = {'raw': out} + result = {} + for line in out.splitlines(): + if line.startswith('#') or line == '': + pass + elif '=' in line: + (key, val) = line.split('=', 1) + result[key] = val.strip('"') + else: + result[line] = '' + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['name']), + namespace=dict(type='str', default='user'), + key=dict(type='str', no_log=False), + value=dict(type='str'), + state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), + follow=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + path = module.params.get('path') + namespace = module.params.get('namespace') + key = module.params.get('key') + value = module.params.get('value') + state = module.params.get('state') + follow = module.params.get('follow') + + if not os.path.exists(path): + module.fail_json(msg="path not found or not accessible!") + + changed = False + msg = "" + res = {} + + if key is None and state in ['absent', 'present']: + module.fail_json(msg="%s needs a key parameter" % state) + + # Prepend the key with the namespace if defined + if ( + key is not None and + namespace is not None and + len(namespace) > 0 and + not (namespace == 'user' and key.startswith('user.'))): + key = '%s.%s' % (namespace, key) + + if (state == 'present' or value is not None): + current = get_xattr(module, path, key, follow) + if current is None or key not in current or value != current[key]: + if not module.check_mode: + res = set_xattr(module, path, key, value, follow) + changed = True + res = current + msg = "%s set to %s" % (key, value) + elif state == 'absent': + current = get_xattr(module, path, key, follow) + if current is not None and key in current: + if not module.check_mode: + res = rm_xattr(module, path, key, follow) + changed = True + res = current + msg = "%s removed" % (key) + elif state == 'keys': + res = get_xattr_keys(module, path, follow) + msg = "returning all keys" + elif state == 'all': + res = get_xattr(module, path, None, follow) + msg = "dumping all" + else: + res = get_xattr(module, path, key, follow) + msg = "returning %s" % key + + module.exit_json(changed=changed, msg=msg, xattr=res) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xbps.py b/ansible_collections/community/general/plugins/modules/xbps.py new file mode 100644 index 000000000..1fea5b384 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xbps.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2016 Dino Occhialini +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: xbps +short_description: Manage packages with XBPS +description: + - Manage packages with the XBPS package manager. +author: + - "Dino Occhialini (@dinoocch)" + - "Michael Aldridge (@the-maldridge)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the package to install, upgrade, or remove. + aliases: [pkg,package] + type: list + elements: str + state: + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent", "latest", "installed", "removed"] + type: str + recurse: + description: + - When removing a package, also remove its dependencies, provided + that they are not required by other packages and were not + explicitly installed by a user. + type: bool + default: false + update_cache: + description: + - Whether or not to refresh the master package lists. This can be + run as part of a package installation or as a separate step. + type: bool + default: true + upgrade: + description: + - Whether or not to upgrade whole system + type: bool + default: false + upgrade_xbps: + description: + - Whether or not to upgrade the xbps package when necessary. + Before installing new packages, + xbps requires the user to update the xbps package itself. + Thus when this option is set to C(false), + upgrades and installations will fail when xbps is not up to date. + type: bool + default: true + version_added: '0.2.0' +''' + +EXAMPLES = ''' +- name: Install package foo (automatically updating the xbps package if needed) + community.general.xbps: + name: foo + state: present + +- name: Upgrade package foo + community.general.xbps: + name: foo + state: latest + update_cache: true + +- name: Remove packages foo and bar + community.general.xbps: + name: + - foo + - bar + state: absent + +- name: Recursively remove package foo + community.general.xbps: + name: foo + state: absent + recurse: true + +- name: Update package cache + community.general.xbps: + update_cache: true + +- name: Upgrade packages + community.general.xbps: + upgrade: true + +- name: Install a package, failing if the xbps package is out of date + community.general.xbps: + name: foo + state: present + upgrade_xbps: false +''' + +RETURN = ''' +msg: + description: Message about results + returned: success + type: str + sample: "System Upgraded" +packages: + description: Packages that are affected/would be affected + type: list + sample: ["ansible"] + returned: success +''' + + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def is_installed(xbps_output): + """Returns package install state""" + return bool(len(xbps_output)) + + +def query_package(module, xbps_path, name, state="present"): + """Returns Package info""" + if state == "present": + lcmd = "%s %s" % (xbps_path['query'], name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if not is_installed(lstdout): + # package is not installed locally + return False, False + + rcmd = "%s -Sun" % (xbps_path['install']) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + if rrc == 0 or rrc == 17: + """Return True to indicate that the package is installed locally, + and the result of the version number comparison to determine if the + package is up-to-date""" + return True, name not in rstdout + + return False, False + + +def update_package_db(module, xbps_path): + """Returns True if update_package_db changed""" + cmd = "%s -S" % (xbps_path['install']) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="Could not update package db") + if "avg rate" in stdout: + return True + else: + return False + + +def upgrade_xbps(module, xbps_path, exit_on_success=False): + cmdupgradexbps = "%s -uy xbps" % (xbps_path['install']) + rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False) + if rc != 0: + module.fail_json(msg='Could not upgrade xbps itself') + + +def upgrade(module, xbps_path): + """Returns true is full upgrade succeeds""" + cmdupgrade = "%s -uy" % (xbps_path['install']) + cmdneedupgrade = "%s -un" % (xbps_path['install']) + + rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) + if rc == 0: + if len(stdout.splitlines()) == 0: + module.exit_json(changed=False, msg='Nothing to upgrade') + elif module.check_mode: + module.exit_json(changed=True, msg='Would have performed upgrade') + else: + rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) + if rc == 0: + module.exit_json(changed=True, msg='System upgraded') + elif rc == 16 and module.params['upgrade_xbps']: + upgrade_xbps(module, xbps_path) + # avoid loops by not trying self-upgrade again + module.params['upgrade_xbps'] = False + upgrade(module, xbps_path) + else: + module.fail_json(msg="Could not upgrade") + else: + module.fail_json(msg="Could not upgrade") + + +def remove_packages(module, xbps_path, packages): + """Returns true if package removal succeeds""" + changed_packages = [] + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, xbps_path, package) + if not installed: + continue + + cmd = "%s -y %s" % (xbps_path['remove'], package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + changed_packages.append(package) + + if len(changed_packages) > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % + len(changed_packages), packages=changed_packages) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, xbps_path, state, packages): + """Returns true if package install succeeds.""" + toInstall = [] + for i, package in enumerate(packages): + """If the package is installed and state == present or state == latest + and is up-to-date then skip""" + installed, updated = query_package(module, xbps_path, package) + if installed and (state == 'present' or + (state == 'latest' and updated)): + continue + + toInstall.append(package) + + if len(toInstall) == 0: + module.exit_json(changed=False, msg="Nothing to Install") + + cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 16 and module.params['upgrade_xbps']: + upgrade_xbps(module, xbps_path) + # avoid loops by not trying self-update again + module.params['upgrade_xbps'] = False + install_packages(module, xbps_path, state, packages) + elif rc != 0 and not (state == 'latest' and rc == 17): + module.fail_json(msg="failed to install %s packages(s)" + % (len(toInstall)), + packages=toInstall) + + module.exit_json(changed=True, msg="installed %s package(s)" + % (len(toInstall)), + packages=toInstall) + + +def check_packages(module, xbps_path, packages, state): + """Returns change status of command""" + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, xbps_path, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state), + packages=would_be_changed) + else: + module.exit_json(changed=False, msg="package(s) already %s" % state, + packages=[]) + + +def update_cache(module, xbps_path, upgrade_planned): + """Update package cache""" + if module.check_mode: + if upgrade_planned: + return + module.exit_json( + changed=True, msg='Would have updated the package cache' + ) + changed = update_package_db(module, xbps_path) + if not upgrade_planned: + module.exit_json(changed=changed, msg=( + 'Updated the package master lists' if changed + else 'Package list already up to date' + )) + + +def main(): + """Returns, calling appropriate command""" + + module = AnsibleModule( + argument_spec=dict( + name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'), + state=dict(default='present', choices=['present', 'installed', + 'latest', 'absent', + 'removed']), + recurse=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + update_cache=dict(default=True, type='bool'), + upgrade_xbps=dict(default=True, type='bool'), + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + supports_check_mode=True) + + xbps_path = dict() + xbps_path['install'] = module.get_bin_path('xbps-install', True) + xbps_path['query'] = module.get_bin_path('xbps-query', True) + xbps_path['remove'] = module.get_bin_path('xbps-remove', True) + + if not os.path.exists(xbps_path['install']): + module.fail_json(msg="cannot find xbps, in path %s" + % (xbps_path['install'])) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p['update_cache']: + update_cache(module, xbps_path, (p['name'] or p['upgrade'])) + + if p['upgrade']: + upgrade(module, xbps_path) + + if p['name']: + pkgs = p['name'] + + if module.check_mode: + check_packages(module, xbps_path, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, xbps_path, p['state'], pkgs) + elif p['state'] == 'absent': + remove_packages(module, xbps_path, pkgs) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py new file mode 100644 index 000000000..494ea061e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py @@ -0,0 +1,795 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: xcc_redfish_command +short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs +version_added: 2.4.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action or get information back or update a configuration attribute. + - Manages virtual media. + - Supports getting information back via GET method. + - Supports updating a configuration attribute via PATCH method. + - Supports performing an action via POST method. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authentication with OOB controller. + type: str + password: + description: + - Password for authentication with OOB controller. + type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify. + type: str + virtual_media: + required: false + description: + - The options for VirtualMedia commands. + type: dict + suboptions: + media_types: + description: + - The list of media types appropriate for the image. + type: list + elements: str + default: [] + image_url: + description: + - The URL of the image to insert or eject. + type: str + inserted: + description: + - Indicates if the image is treated as inserted on command completion. + type: bool + default: true + write_protected: + description: + - Indicates if the media is treated as write-protected. + type: bool + default: true + username: + description: + - The username for accessing the image URL. + type: str + password: + description: + - The password for accessing the image URL. + type: str + transfer_protocol_type: + description: + - The network protocol to use with the image. + type: str + transfer_method: + description: + - The transfer method to use with the image. + type: str + resource_uri: + required: false + description: + - The resource uri to get or patch or post. + type: str + request_body: + required: false + description: + - The request body to patch or post. + type: dict + +author: "Yuyan Pan (@panyy3)" +''' + +EXAMPLES = ''' + - name: Insert Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + media_types: + - CD + - DVD + resource_id: "1" + + - name: Eject Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + resource_id: "1" + + - name: Eject all Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_id: "1" + + - name: Get ComputeSystem Oem property SystemStatus via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + register: result + - ansible.builtin.debug: + msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" + + - name: Get Oem DNS setting via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.data }}" + + - name: Get Lenovo FoD key collection resource via GetCollectionResource command + community.general.xcc_redfish_command: + category: Raw + command: GetCollectionResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" + register: result + + - name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.data_list }}" + + - name: Update ComputeSystem property AssetTag via PatchResource command + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + request_body: + AssetTag: "new_asset_tag" + + - name: Perform BootToBIOSSetup action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" + request_body: {} + + - name: Perform SecureBoot.ResetKeys action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" + request_body: + ResetKeysType: DeleteAllKeys + + - name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Update Manager DateTimeLocalOffset property using security token for auth + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + resource_uri: "/redfish/v1/Managers/1" + request_body: + DateTimeLocalOffset: "+08:00" + + - name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" +''' + +RETURN = ''' +msg: + description: A message related to the performed action(s). + returned: when failure or action/update success + type: str + sample: "Action was successful" +redfish_facts: + description: Resource content. + returned: when command == GetResource or command == GetCollectionResource + type: dict + sample: '{ + "redfish_facts": { + "data": { + "@odata.etag": "\"3179bf00d69f25a8b3c\"", + "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", + "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", + "DDNS": [ + { + "DDNSEnable": true, + "DomainName": "", + "DomainNameSource": "DHCP" + } + ], + "DNSEnable": true, + "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", + "IPv4Address1": "10.103.62.178", + "IPv4Address2": "0.0.0.0", + "IPv4Address3": "0.0.0.0", + "IPv6Address1": "::", + "IPv6Address2": "::", + "IPv6Address3": "::", + "Id": "LenovoDNS", + "PreferredAddresstype": "IPv4" + }, + "ret": true + } + }' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + + +class XCCRedfishUtils(RedfishUtils): + @staticmethod + def _find_empty_virt_media_slot(resources, media_types, + media_match_strict=True): + for uri, data in resources.items(): + # check MediaTypes + if 'MediaTypes' in data and media_types: + if not set(media_types).intersection(set(data['MediaTypes'])): + continue + else: + if media_match_strict: + continue + if 'RDOC' in uri: + continue + if 'Remote' in uri: + continue + # if ejected, 'Inserted' should be False and 'ImageName' cleared + if (not data.get('Inserted', False) and + not data.get('ImageName')): + return uri, data + return None, None + + def virtual_media_eject_one(self, image_url): + # read the VirtualMedia resources from systems + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + # read the VirtualMedia resources from manager + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] + response = self.get_request(self.root_uri + virt_media_uri) + if response['ret'] is False: + return response + data = response['data'] + virt_media_list = [] + for member in data[u'Members']: + virt_media_list.append(member[u'@odata.id']) + resources, headers = self._read_virt_media_resources(virt_media_list) + + # find the VirtualMedia resource to eject + uri, data, eject = self._find_virt_media_to_eject(resources, image_url) + if uri and eject: + if ('Actions' not in data or + '#VirtualMedia.EjectMedia' not in data['Actions']): + # try to eject via PATCH if no EjectMedia action found + h = headers[uri] + if 'allow' in h: + methods = [m.strip() for m in h.get('allow').split(',')] + if 'PATCH' not in methods: + # if Allow header present and PATCH missing, return error + return {'ret': False, + 'msg': "%s action not found and PATCH not allowed" + % '#VirtualMedia.EjectMedia'} + return self.virtual_media_eject_via_patch(uri) + else: + # POST to the EjectMedia Action + action = data['Actions']['#VirtualMedia.EjectMedia'] + if 'target' not in action: + return {'ret': False, + 'msg': "target URI property missing from Action " + "#VirtualMedia.EjectMedia"} + action_uri = action['target'] + # empty payload for Eject action + payload = {} + # POST to action + response = self.post_request(self.root_uri + action_uri, + payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "VirtualMedia ejected"} + elif uri and not eject: + # already ejected: return success but changed=False + return {'ret': True, 'changed': False, + 'msg': "VirtualMedia image '%s' already ejected" % + image_url} + else: + # return failure (no resources matching image_url found) + return {'ret': False, 'changed': False, + 'msg': "No VirtualMedia resource found with image '%s' " + "inserted" % image_url} + + def virtual_media_eject(self, options): + if options: + image_url = options.get('image_url') + if image_url: # eject specified one media + return self.virtual_media_eject_one(image_url) + + # eject all inserted media when no image_url specified + # read the VirtualMedia resources from systems + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + # read the VirtualMedia resources from manager + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + return {'ret': False, 'msg': "VirtualMedia resource not found"} + # read all the VirtualMedia resources + virt_media_uri = data["VirtualMedia"]["@odata.id"] + response = self.get_request(self.root_uri + virt_media_uri) + if response['ret'] is False: + return response + data = response['data'] + virt_media_list = [] + for member in data[u'Members']: + virt_media_list.append(member[u'@odata.id']) + resources, headers = self._read_virt_media_resources(virt_media_list) + + # eject all inserted media one by one + ejected_media_list = [] + for uri, data in resources.items(): + if data.get('Image') and data.get('Inserted', True): + returndict = self.virtual_media_eject_one(data.get('Image')) + if not returndict['ret']: + return returndict + ejected_media_list.append(data.get('Image')) + + if len(ejected_media_list) == 0: + # no media inserted: return success but changed=False + return {'ret': True, 'changed': False, + 'msg': "No VirtualMedia image inserted"} + else: + return {'ret': True, 'changed': True, + 'msg': "VirtualMedia %s ejected" % str(ejected_media_list)} + + def virtual_media_insert(self, options): + param_map = { + 'Inserted': 'inserted', + 'WriteProtected': 'write_protected', + 'UserName': 'username', + 'Password': 'password', + 'TransferProtocolType': 'transfer_protocol_type', + 'TransferMethod': 'transfer_method' + } + image_url = options.get('image_url') + if not image_url: + return {'ret': False, + 'msg': "image_url option required for VirtualMediaInsert"} + media_types = options.get('media_types') + + # read the VirtualMedia resources from systems + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + # read the VirtualMedia resources from manager + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] + response = self.get_request(self.root_uri + virt_media_uri) + if response['ret'] is False: + return response + data = response['data'] + virt_media_list = [] + for member in data[u'Members']: + virt_media_list.append(member[u'@odata.id']) + resources, headers = self._read_virt_media_resources(virt_media_list) + + # see if image already inserted; if so, nothing to do + if self._virt_media_image_inserted(resources, image_url): + return {'ret': True, 'changed': False, + 'msg': "VirtualMedia '%s' already inserted" % image_url} + + # find an empty slot to insert the media + # try first with strict media_type matching + uri, data = self._find_empty_virt_media_slot( + resources, media_types, media_match_strict=True) + if not uri: + # if not found, try without strict media_type matching + uri, data = self._find_empty_virt_media_slot( + resources, media_types, media_match_strict=False) + if not uri: + return {'ret': False, + 'msg': "Unable to find an available VirtualMedia resource " + "%s" % ('supporting ' + str(media_types) + if media_types else '')} + + # confirm InsertMedia action found + if ('Actions' not in data or + '#VirtualMedia.InsertMedia' not in data['Actions']): + # try to insert via PATCH if no InsertMedia action found + h = headers[uri] + if 'allow' in h: + methods = [m.strip() for m in h.get('allow').split(',')] + if 'PATCH' not in methods: + # if Allow header present and PATCH missing, return error + return {'ret': False, + 'msg': "%s action not found and PATCH not allowed" + % '#VirtualMedia.InsertMedia'} + return self.virtual_media_insert_via_patch(options, param_map, + uri, data) + + # get the action property + action = data['Actions']['#VirtualMedia.InsertMedia'] + if 'target' not in action: + return {'ret': False, + 'msg': "target URI missing from Action " + "#VirtualMedia.InsertMedia"} + action_uri = action['target'] + # get ActionInfo or AllowableValues + ai = self._get_all_action_info_values(action) + # construct payload + payload = self._insert_virt_media_payload(options, param_map, data, ai) + # POST to action + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} + + def raw_get_resource(self, resource_uri): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + data = response['data'] + return {'ret': True, 'data': data} + + def raw_get_collection_resource(self, resource_uri): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + if 'Members' not in response['data']: + return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"} + member_list = [i['@odata.id'] for i in response['data'].get('Members', [])] + + # get member resource one by one + data_list = [] + for member_uri in member_list: + uri = self.root_uri + member_uri + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + data_list.append(data) + + return {'ret': True, 'data_list': data_list} + + def raw_patch_resource(self, resource_uri, request_body): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + if request_body is None: + return {'ret': False, 'msg': "request_body is missing"} + # check whether resource_uri existing or not + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + original_etag = response['data']['@odata.etag'] + + # check validity of keys in request_body + data = response['data'] + for key in request_body.keys(): + if key not in data: + return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))} + + # perform patch + response = self.patch_request(self.root_uri + resource_uri, request_body) + if response['ret'] is False: + return response + + # check whether changed or not + current_etag = '' + if 'data' in response and '@odata.etag' in response['data']: + current_etag = response['data']['@odata.etag'] + if current_etag != original_etag: + return {'ret': True, 'changed': True} + else: + return {'ret': True, 'changed': False} + + def raw_post_resource(self, resource_uri, request_body): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + if '/Actions/' not in resource_uri: + return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri} + if request_body is None: + return {'ret': False, 'msg': "request_body is missing"} + # get action base uri data for further checking + action_base_uri = resource_uri.split('/Actions/')[0] + response = self.get_request(self.root_uri + action_base_uri) + if response['ret'] is False: + return response + if 'Actions' not in response['data']: + return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri} + + # check resouce_uri with target uri found in action base uri data + action_found = False + action_info_uri = None + action_target_uri_list = [] + for key in response['data']['Actions'].keys(): + if action_found: + break + if not key.startswith('#'): + continue + if 'target' in response['data']['Actions'][key]: + if resource_uri == response['data']['Actions'][key]['target']: + action_found = True + if '@Redfish.ActionInfo' in response['data']['Actions'][key]: + action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo'] + else: + action_target_uri_list.append(response['data']['Actions'][key]['target']) + if not action_found and 'Oem' in response['data']['Actions']: + for key in response['data']['Actions']['Oem'].keys(): + if action_found: + break + if not key.startswith('#'): + continue + if 'target' in response['data']['Actions']['Oem'][key]: + if resource_uri == response['data']['Actions']['Oem'][key]['target']: + action_found = True + if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]: + action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo'] + else: + action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target']) + + if not action_found: + return {'ret': False, + 'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s' + % (str(action_target_uri_list))} + + # check request_body with parameter name defined by @Redfish.ActionInfo + if action_info_uri is not None: + response = self.get_request(self.root_uri + action_info_uri) + if response['ret'] is False: + return response + for key in request_body.keys(): + key_found = False + for para in response['data']['Parameters']: + if key == para['Name']: + key_found = True + break + if not key_found: + return {'ret': False, + 'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s' + % (key, str(response['data']['Parameters']))} + + # perform post + response = self.post_request(self.root_uri + resource_uri, request_body) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Manager": ["VirtualMediaInsert", + "VirtualMediaEject"], + "Raw": ["GetResource", + "GetCollectionResource", + "PatchResource", + "PostResource"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + resource_uri=dict(), + request_body=dict( + type='dict', + ), + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # VirtualMedia options + virtual_media = module.params['virtual_media'] + + # resource_uri + resource_uri = module.params['resource_uri'] + + # request_body + request_body = module.params['request_body'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Manager": + # For virtual media resource locates on Systems service + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + # For virtual media resource locates on Managers service + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media) + elif category == "Raw": + for command in command_list: + if command == 'GetResource': + result = rf_utils.raw_get_resource(resource_uri) + elif command == 'GetCollectionResource': + result = rf_utils.raw_get_collection_resource(resource_uri) + elif command == 'PatchResource': + result = rf_utils.raw_patch_resource(resource_uri, request_body) + elif command == 'PostResource': + result = rf_utils.raw_post_resource(resource_uri, request_body) + + # Return data back or fail with proper message + if result['ret'] is True: + if command == 'GetResource' or command == 'GetCollectionResource': + module.exit_json(redfish_facts=result) + else: + changed = result.get('changed', True) + msg = result.get('msg', 'Action was successful') + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xenserver_facts.py b/ansible_collections/community/general/plugins/modules/xenserver_facts.py new file mode 100644 index 000000000..9924c4a9e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xenserver_facts.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: xenserver_facts +short_description: Get facts reported on xenserver +description: + - Reads data out of XenAPI, can be used instead of multiple xe commands. +author: + - Andy Hill (@andyhky) + - Tim Rupp (@caphrim007) + - Robin Lee (@cheese) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: {} +''' + +EXAMPLES = ''' +- name: Gather facts from xenserver + community.general.xenserver_facts: + +- name: Print running VMs + ansible.builtin.debug: + msg: "{{ item }}" + with_items: "{{ xs_vms.keys() }}" + when: xs_vms[item]['power_state'] == "Running" + +# Which will print: +# +# TASK: [Print running VMs] *********************************************************** +# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit)) +# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => { +# "item": "Control domain on host: 10.0.13.22", +# "msg": "Control domain on host: 10.0.13.22" +# } +''' + + +HAVE_XENAPI = False +try: + import XenAPI + HAVE_XENAPI = True +except ImportError: + pass + +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule + + +class XenServerFacts: + def __init__(self): + self.codes = { + '5.5.0': 'george', + '5.6.100': 'oxford', + '6.0.0': 'boston', + '6.1.0': 'tampa', + '6.2.0': 'clearwater' + } + + @property + def version(self): + result = distro.linux_distribution()[1] + return result + + @property + def codename(self): + if self.version in self.codes: + result = self.codes[self.version] + else: + result = None + + return result + + +def get_xenapi_session(): + session = XenAPI.xapi_local() + session.xenapi.login_with_password('', '') + return session + + +def get_networks(session): + recs = session.xenapi.network.get_all_records() + networks = change_keys(recs, key='name_label') + return networks + + +def get_pifs(session): + recs = session.xenapi.PIF.get_all_records() + pifs = change_keys(recs, key='uuid') + xs_pifs = {} + devicenums = range(0, 7) + for pif in pifs.values(): + for eth in devicenums: + interface_name = "eth%s" % (eth) + bond_name = interface_name.replace('eth', 'bond') + if pif['device'] == interface_name: + xs_pifs[interface_name] = pif + elif pif['device'] == bond_name: + xs_pifs[bond_name] = pif + return xs_pifs + + +def get_vlans(session): + recs = session.xenapi.VLAN.get_all_records() + return change_keys(recs, key='tag') + + +def change_keys(recs, key='uuid', filter_func=None): + """ + Take a xapi dict, and make the keys the value of recs[ref][key]. + + Preserves the ref in rec['ref'] + + """ + new_recs = {} + + for ref, rec in recs.items(): + if filter_func is not None and not filter_func(rec): + continue + + for param_name, param_value in rec.items(): + # param_value may be of type xmlrpc.client.DateTime, + # which is not simply convertable to str. + # Use 'value' attr to get the str value, + # following an example in xmlrpc.client.DateTime document + if hasattr(param_value, "value"): + rec[param_name] = param_value.value + new_recs[rec[key]] = rec + new_recs[rec[key]]['ref'] = ref + + return new_recs + + +def get_host(session): + """Get the host""" + host_recs = session.xenapi.host.get_all() + # We only have one host, so just return its entry + return session.xenapi.host.get_record(host_recs[0]) + + +def get_vms(session): + recs = session.xenapi.VM.get_all_records() + if not recs: + return None + vms = change_keys(recs, key='name_label') + return vms + + +def get_srs(session): + recs = session.xenapi.SR.get_all_records() + if not recs: + return None + srs = change_keys(recs, key='name_label') + return srs + + +def main(): + module = AnsibleModule({}, supports_check_mode=True) + + if not HAVE_XENAPI: + module.fail_json(changed=False, msg="python xen api required for this module") + + obj = XenServerFacts() + try: + session = get_xenapi_session() + except XenAPI.Failure as e: + module.fail_json(msg='%s' % e) + + data = { + 'xenserver_version': obj.version, + 'xenserver_codename': obj.codename + } + + xs_networks = get_networks(session) + xs_pifs = get_pifs(session) + xs_vlans = get_vlans(session) + xs_vms = get_vms(session) + xs_srs = get_srs(session) + + if xs_vlans: + data['xs_vlans'] = xs_vlans + if xs_pifs: + data['xs_pifs'] = xs_pifs + if xs_networks: + data['xs_networks'] = xs_networks + + if xs_vms: + data['xs_vms'] = xs_vms + + if xs_srs: + data['xs_srs'] = xs_srs + + module.exit_json(ansible_facts=data) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest.py b/ansible_collections/community/general/plugins/modules/xenserver_guest.py new file mode 100644 index 000000000..7659ee2ae --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xenserver_guest.py @@ -0,0 +1,2033 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: xenserver_guest +short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to create new virtual machines from templates or other virtual machines, + modify various virtual machine components like network and disk, rename a virtual machine and + remove a virtual machine with associated components. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(false) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on + XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to + detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest + agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6) + values C(none) and C(dhcp) have same effect. More info here: + U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' +- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore + C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough + WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user + to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters. + Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any + parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most + useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here: + U(https://support.citrix.com/article/CTX226713)' +requirements: +- python >= 2.6 +- XenAPI +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Specify the state VM should be in. + - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. + - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. + - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components. + - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. + type: str + default: present + choices: [ present, absent, poweredon ] + name: + description: + - Name of the VM to work with. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + aliases: [ name_label ] + name_desc: + description: + - VM description. + type: str + uuid: + description: + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. + - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally. + type: str + template: + description: + - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. + - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. + - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template. + - If VM already exists, this setting will be ignored. + - This parameter is case sensitive. + type: str + aliases: [ template_src ] + template_uuid: + description: + - UUID of a template, an existing VM or a snapshot that should be used to create VM. + - It is required if template name is not unique. + type: str + is_template: + description: + - Convert VM to template. + type: bool + default: false + folder: + description: + - Destination folder for VM. + - This parameter is case sensitive. + - 'Example:' + - ' folder: /folder1/folder2' + type: str + hardware: + description: + - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. + type: dict + suboptions: + num_cpus: + description: + - Number of CPUs. + type: int + num_cpu_cores_per_socket: + description: + - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket). + type: int + memory_mb: + description: + - Amount of memory in MB. + type: int + disks: + description: + - A list of disks to add to VM. + - All parameters are case sensitive. + - Removing or detaching existing disks of VM is not supported. + - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified. + - VM needs to be shut down to reconfigure disk size. + type: list + elements: dict + aliases: [ disk ] + suboptions: + size: + description: + - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.' + - If no unit is specified, size is assumed to be in bytes. + type: str + size_b: + description: + - Disk size in bytes. + type: str + size_kb: + description: + - Disk size in kilobytes. + type: str + size_mb: + description: + - Disk size in megabytes. + type: str + size_gb: + description: + - Disk size in gigabytes. + type: str + size_tb: + description: + - Disk size in terabytes. + type: str + name: + description: + - Disk name. + type: str + aliases: [ name_label ] + name_desc: + description: + - Disk description. + type: str + sr: + description: + - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. + type: str + sr_uuid: + description: + - UUID of a SR to create disk on. Use if SR name is not unique. + type: str + cdrom: + description: + - A CD-ROM configuration for the VM. + - All parameters are case sensitive. + type: dict + suboptions: + type: + description: + - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty. + type: str + choices: [ none, iso ] + iso_name: + description: + - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).' + - Required if I(type) is set to C(iso). + type: str + networks: + description: + - A list of networks (in the order of the NICs). + - All parameters are case sensitive. + - Name is required for new NICs. Other parameters are optional in all cases. + type: list + elements: dict + aliases: [ network ] + suboptions: + name: + description: + - Name of a XenServer network to attach the network interface to. + type: str + aliases: [ name_label ] + mac: + description: + - Customize MAC address of the interface. + type: str + type: + description: + - Type of IPv4 assignment. Value C(none) means whatever is default for OS. + - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). + type: str + choices: [ none, dhcp, static ] + ip: + description: + - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(/) instead of using C(netmask).' + type: str + netmask: + description: + - Static IPv4 netmask required for I(ip) if prefix is not specified. + type: str + gateway: + description: + - Static IPv4 gateway. + type: str + type6: + description: + - Type of IPv6 assignment. Value C(none) means whatever is default for OS. + type: str + choices: [ none, dhcp, static ] + ip6: + description: + - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(/).' + type: str + gateway6: + description: + - Static IPv6 gateway. + type: str + home_server: + description: + - Name of a XenServer host that will be a Home Server for the VM. + - This parameter is case sensitive. + type: str + custom_params: + description: + - Define a list of custom VM params to set on VM. + - Useful for advanced users familiar with managing VM params trough xe CLI. + - A custom value object takes two fields I(key) and I(value) (see example below). + type: list + elements: dict + suboptions: + key: + description: + - VM param name. + type: str + required: true + value: + description: + - VM param value. + type: raw + required: true + wait_for_ip_address: + description: + - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored. + - This requires XenServer Tools to be preinstalled on the VM to work properly. + type: bool + default: false + state_change_timeout: + description: + - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(true).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. + type: int + default: 0 + linked_clone: + description: + - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy. + - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter. + type: bool + default: false + force: + description: + - Ignore warnings and complete the actions. + - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down. + type: bool + default: false +extends_documentation_fragment: +- community.general.xenserver.documentation +- community.general.attributes + +''' + +EXAMPLES = r''' +- name: Create a VM from a template + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + validate_certs: false + folder: /testvms + name: testvm_2 + state: poweredon + template: CentOS 7 + disks: + - size_gb: 10 + sr: my_sr + hardware: + num_cpus: 6 + num_cpu_cores_per_socket: 3 + memory_mb: 512 + cdrom: + type: iso + iso_name: guest-tools.iso + networks: + - name: VM Network + mac: aa:bb:dd:aa:00:14 + wait_for_ip_address: true + delegate_to: localhost + register: deploy + +- name: Create a VM template + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + validate_certs: false + folder: /testvms + name: testvm_6 + is_template: true + disk: + - size_gb: 10 + sr: my_sr + hardware: + memory_mb: 512 + num_cpus: 1 + delegate_to: localhost + register: deploy + +- name: Rename a VM (requires the VM's UUID) + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + uuid: 421e4592-c069-924d-ce20-7e7533fab926 + name: new_name + state: present + delegate_to: localhost + +- name: Remove a VM by UUID + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + uuid: 421e4592-c069-924d-ce20-7e7533fab926 + state: absent + delegate_to: localhost + +- name: Modify custom params (boot order) + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_8 + state: present + custom_params: + - key: HVM_boot_params + value: { "order": "ndc" } + delegate_to: localhost + +- name: Customize network parameters + community.general.xenserver_guest: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_10 + networks: + - name: VM Network + ip: 192.168.1.100/24 + gateway: 192.168.1.1 + - type: dhcp + delegate_to: localhost +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +changes: + description: Detected or made changes to VM + returned: always + type: list + sample: [ + { + "hardware": [ + "num_cpus" + ] + }, + { + "disks_changed": [ + [], + [ + "size" + ] + ] + }, + { + "disks_new": [ + { + "name": "new-disk", + "name_desc": "", + "position": 2, + "size_gb": "4", + "vbd_userdevice": "2" + } + ] + }, + { + "cdrom": [ + "type", + "iso_name" + ] + }, + { + "networks_changed": [ + [ + "mac" + ], + ] + }, + { + "networks_new": [ + { + "name": "Pool-wide network associated with eth2", + "position": 1, + "vif_device": "1" + } + ] + }, + "need_poweredoff" + ] +''' + +import re + +HAS_XENAPI = False +try: + import XenAPI + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.network import is_mac +from ansible.module_utils import six +from ansible_collections.community.general.plugins.module_utils.xenserver import ( + xenserver_common_argument_spec, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts, set_vm_power_state, + wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, + is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, + is_valid_ip6_addr, is_valid_ip6_prefix) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to Ansible module object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ") + self.gather_params() + + def exists(self): + """Returns True if VM exists, else False.""" + return True if self.vm_ref is not None else False + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + def set_power_state(self, power_state): + """Controls VM power state.""" + state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + + # If state has changed, update vm_params. + if state_changed: + self.vm_params['power_state'] = current_state.capitalize() + + return state_changed + + def wait_for_ip_address(self): + """Waits for VM to acquire an IP address.""" + self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + + def deploy(self): + """Deploys new VM from template.""" + # Safety check. + if self.exists(): + self.module.fail_json(msg="Called deploy on existing VM!") + + try: + templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True, + msg_prefix="VM deploy: ") + + # Is this an existing running VM? + if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted': + self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!") + + # Find a SR we can use for VM.copy(). We use SR of the first disk + # if specified or default SR if not specified. + disk_params_list = self.module.params['disks'] + + sr_ref = None + + if disk_params_list: + disk_params = disk_params_list[0] + + disk_sr_uuid = disk_params.get('sr_uuid') + disk_sr = disk_params.get('sr') + + if disk_sr_uuid is not None or disk_sr is not None: + sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, + msg_prefix="VM deploy disks[0]: ") + + if not sr_ref: + if self.default_sr_ref != "OpaqueRef:NULL": + sr_ref = self.default_sr_ref + else: + self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.") + + # VM name could be an empty string which is bad. + if self.module.params['name'] is not None and not self.module.params['name']: + self.module.fail_json(msg="VM deploy: VM name must not be an empty string!") + + # Support for Ansible check mode. + if self.module.check_mode: + return + + # Now we can instantiate VM. We use VM.clone for linked_clone and + # VM.copy for non linked_clone. + if self.module.params['linked_clone']: + self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name']) + else: + self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref) + + # Description is copied over from template so we reset it. + self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "") + + # If template is one of built-in XenServer templates, we have to + # do some additional steps. + # Note: VM.get_is_default_template() is supported from XenServer 7.2 + # onward so we use an alternative way. + templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref) + + if "default_template" in templ_other_config and templ_other_config['default_template']: + # other_config of built-in XenServer templates have a key called + # 'disks' with the following content: + # disks: + # This value of other_data is copied to cloned or copied VM and + # it prevents provisioning of VM because sr is not specified and + # XAPI returns an error. To get around this, we remove the + # 'disks' key and add disks to VM later ourselves. + vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref) + + if "disks" in vm_other_config: + del vm_other_config['disks'] + + self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config) + + # At this point we have VM ready for provisioning. + self.xapi_session.xenapi.VM.provision(self.vm_ref) + + # After provisioning we can prepare vm_params for reconfigure(). + self.gather_params() + + # VM is almost ready. We just need to reconfigure it... + self.reconfigure() + + # Power on VM if needed. + if self.module.params['state'] == "poweredon": + self.set_power_state("poweredon") + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def reconfigure(self): + """Reconfigures an existing VM. + + Returns: + list: parameters that were reconfigured. + """ + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called reconfigure on non existing VM!") + + config_changes = self.get_changes() + + vm_power_state_save = self.vm_params['power_state'].lower() + + if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']: + self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!") + + # Support for Ansible check mode. + if self.module.check_mode: + return config_changes + + if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']: + self.set_power_state("shutdownguest") + + try: + for change in config_changes: + if isinstance(change, six.string_types): + if change == "name": + self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name']) + elif change == "name_desc": + self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc']) + elif change == "folder": + self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder') + + if self.module.params['folder']: + self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder']) + elif change == "home_server": + if self.module.params['home_server']: + host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0] + else: + host_ref = "OpaqueRef:NULL" + + self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref) + elif isinstance(change, dict): + if change.get('hardware'): + for hardware_change in change['hardware']: + if hardware_change == "num_cpus": + num_cpus = int(self.module.params['hardware']['num_cpus']) + + if num_cpus < int(self.vm_params['VCPUs_at_startup']): + self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) + self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) + else: + self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus)) + self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus)) + elif hardware_change == "num_cpu_cores_per_socket": + self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket') + num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket']) + + if num_cpu_cores_per_socket > 1: + self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket)) + elif hardware_change == "memory_mb": + memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576) + vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min']))) + + self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b) + elif change.get('disks_changed'): + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + position = 0 + + for disk_change_list in change['disks_changed']: + for disk_change in disk_change_list: + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid']) + + if disk_change == "name": + self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name']) + elif disk_change == "name_desc": + self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc']) + elif disk_change == "size": + self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position], + "VM reconfigure disks[%s]: " % position))) + + position += 1 + elif change.get('disks_new'): + for position, disk_userdevice in change['disks_new']: + disk_params = self.module.params['disks'][position] + + disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position) + disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else "" + + if disk_params.get('sr_uuid'): + sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid']) + elif disk_params.get('sr'): + sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0] + else: + sr_ref = self.default_sr_ref + + disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position)) + + new_disk_vdi = { + "name_label": disk_name, + "name_description": disk_name_desc, + "SR": sr_ref, + "virtual_size": disk_size, + "type": "user", + "sharable": False, + "read_only": False, + "other_config": {}, + } + + new_disk_vbd = { + "VM": self.vm_ref, + "VDI": None, + "userdevice": disk_userdevice, + "bootable": False, + "mode": "RW", + "type": "Disk", + "empty": False, + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi) + vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VBD.plug(vbd_ref_new) + + elif change.get('cdrom'): + vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + + # If there is no CD present, we have to create one. + if not vm_cdrom_params_list: + # We will try to place cdrom at userdevice position + # 3 (which is default) if it is not already occupied + # else we will place it at first allowed position. + cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) + + if "3" in cdrom_userdevices_allowed: + cdrom_userdevice = "3" + else: + cdrom_userdevice = cdrom_userdevices_allowed[0] + + cdrom_vbd = { + "VM": self.vm_ref, + "VDI": "OpaqueRef:NULL", + "userdevice": cdrom_userdevice, + "bootable": False, + "mode": "RO", + "type": "CD", + "empty": True, + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd) + else: + cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid']) + + cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref) + + for cdrom_change in change['cdrom']: + if cdrom_change == "type": + cdrom_type = self.module.params['cdrom']['type'] + + if cdrom_type == "none" and not cdrom_is_empty: + self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) + elif cdrom_type == "host": + # Unimplemented! + pass + + elif cdrom_change == "iso_name": + if not cdrom_is_empty: + self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref) + + cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0] + self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref) + elif change.get('networks_changed'): + position = 0 + + for network_change_list in change['networks_changed']: + if network_change_list: + vm_vif_params = self.vm_params['VIFs'][position] + network_params = self.module.params['networks'][position] + + vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid']) + network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid']) + + vif_recreated = False + + if "name" in network_change_list or "mac" in network_change_list: + # To change network or MAC, we destroy old + # VIF and then create a new one with changed + # parameters. That's how XenCenter does it. + + # Copy all old parameters to new VIF record. + vif = { + "device": vm_vif_params['device'], + "network": network_ref, + "VM": vm_vif_params['VM'], + "MAC": vm_vif_params['MAC'], + "MTU": vm_vif_params['MTU'], + "other_config": vm_vif_params['other_config'], + "qos_algorithm_type": vm_vif_params['qos_algorithm_type'], + "qos_algorithm_params": vm_vif_params['qos_algorithm_params'], + "locking_mode": vm_vif_params['locking_mode'], + "ipv4_allowed": vm_vif_params['ipv4_allowed'], + "ipv6_allowed": vm_vif_params['ipv6_allowed'], + } + + if "name" in network_change_list: + network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] + vif['network'] = network_ref_new + vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new) + + if "mac" in network_change_list: + vif['MAC'] = network_params['mac'].lower() + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.unplug(vif_ref) + + self.xapi_session.xenapi.VIF.destroy(vif_ref) + vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.plug(vif_ref_new) + + vif_ref = vif_ref_new + vif_recreated = True + + if self.vm_params['customization_agent'] == "native": + vif_reconfigure_needed = False + + if "type" in network_change_list: + network_type = network_params['type'].capitalize() + vif_reconfigure_needed = True + else: + network_type = vm_vif_params['ipv4_configuration_mode'] + + if "ip" in network_change_list: + network_ip = network_params['ip'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv4_addresses']: + network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0] + else: + network_ip = "" + + if "prefix" in network_change_list: + network_prefix = "/%s" % network_params['prefix'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]: + network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1] + else: + network_prefix = "" + + if "gateway" in network_change_list: + network_gateway = network_params['gateway'] + vif_reconfigure_needed = True + else: + network_gateway = vm_vif_params['ipv4_gateway'] + + if vif_recreated or vif_reconfigure_needed: + self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type, + "%s%s" % (network_ip, network_prefix), network_gateway) + + vif_reconfigure_needed = False + + if "type6" in network_change_list: + network_type6 = network_params['type6'].capitalize() + vif_reconfigure_needed = True + else: + network_type6 = vm_vif_params['ipv6_configuration_mode'] + + if "ip6" in network_change_list: + network_ip6 = network_params['ip6'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv6_addresses']: + network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0] + else: + network_ip6 = "" + + if "prefix6" in network_change_list: + network_prefix6 = "/%s" % network_params['prefix6'] + vif_reconfigure_needed = True + elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]: + network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1] + else: + network_prefix6 = "" + + if "gateway6" in network_change_list: + network_gateway6 = network_params['gateway6'] + vif_reconfigure_needed = True + else: + network_gateway6 = vm_vif_params['ipv6_gateway'] + + if vif_recreated or vif_reconfigure_needed: + self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6, + "%s%s" % (network_ip6, network_prefix6), network_gateway6) + + elif self.vm_params['customization_agent'] == "custom": + vif_device = vm_vif_params['device'] + + # A user could have manually changed network + # or mac e.g. trough XenCenter and then also + # make those changes in playbook manually. + # In that case, module will not detect any + # changes and info in xenstore_data will + # become stale. For that reason we always + # update name and mac in xenstore_data. + + # Since we handle name and mac differently, + # we have to remove them from + # network_change_list. + network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']] + + for network_change in network_change_list_tmp + ['name', 'mac']: + self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, network_change)) + + if network_params.get('name'): + network_name = network_params['name'] + else: + network_name = vm_vif_params['network']['name_label'] + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name) + + if network_params.get('mac'): + network_mac = network_params['mac'].lower() + else: + network_mac = vm_vif_params['MAC'].lower() + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac) + + for network_change in network_change_list_tmp: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/%s" % (vif_device, network_change), + network_params[network_change]) + + position += 1 + elif change.get('networks_new'): + for position, vif_device in change['networks_new']: + network_params = self.module.params['networks'][position] + + network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0] + + network_name = network_params['name'] + network_mac = network_params['mac'] if network_params.get('mac') else "" + network_type = network_params.get('type') + network_ip = network_params['ip'] if network_params.get('ip') else "" + network_prefix = network_params['prefix'] if network_params.get('prefix') else "" + network_netmask = network_params['netmask'] if network_params.get('netmask') else "" + network_gateway = network_params['gateway'] if network_params.get('gateway') else "" + network_type6 = network_params.get('type6') + network_ip6 = network_params['ip6'] if network_params.get('ip6') else "" + network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else "" + network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else "" + + vif = { + "device": vif_device, + "network": network_ref, + "VM": self.vm_ref, + "MAC": network_mac, + "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref), + "other_config": {}, + "qos_algorithm_type": "", + "qos_algorithm_params": {}, + } + + vif_ref_new = self.xapi_session.xenapi.VIF.create(vif) + + if self.vm_params['power_state'].lower() == "running": + self.xapi_session.xenapi.VIF.plug(vif_ref_new) + + if self.vm_params['customization_agent'] == "native": + if network_type and network_type == "static": + self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static", + "%s/%s" % (network_ip, network_prefix), network_gateway) + + if network_type6 and network_type6 == "static": + self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static", + "%s/%s" % (network_ip6, network_prefix6), network_gateway6) + elif self.vm_params['customization_agent'] == "custom": + # We first have to remove any existing data + # from xenstore_data because there could be + # some old leftover data from some interface + # that once occupied same device location as + # our new interface. + for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: + self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param)) + + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name) + + # We get MAC from VIF itself instead of + # networks.mac because it could be + # autogenerated. + vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac) + + if network_type: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type) + + if network_type == "static": + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/ip" % vif_device, network_ip) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/prefix" % vif_device, network_prefix) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/netmask" % vif_device, network_netmask) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/gateway" % vif_device, network_gateway) + + if network_type6: + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6) + + if network_type6 == "static": + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/ip6" % vif_device, network_ip6) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/prefix6" % vif_device, network_prefix6) + self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, + "vm-data/networks/%s/gateway6" % vif_device, network_gateway6) + + elif change.get('custom_params'): + for position in change['custom_params']: + custom_param_key = self.module.params['custom_params'][position]['key'] + custom_param_value = self.module.params['custom_params'][position]['value'] + self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value)) + + if self.module.params['is_template']: + self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True) + elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted': + self.set_power_state("poweredon") + + # Gather new params after reconfiguration. + self.gather_params() + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + return config_changes + + def destroy(self): + """Removes an existing VM with associated disks""" + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called destroy on non existing VM!") + + if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']: + self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!") + + # Support for Ansible check mode. + if self.module.check_mode: + return + + # Make sure that VM is poweredoff before we can destroy it. + self.set_power_state("poweredoff") + + try: + # Destroy VM! + self.xapi_session.xenapi.VM.destroy(self.vm_ref) + + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + + # Destroy all VDIs associated with VM! + for vm_disk_params in vm_disk_params_list: + vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid']) + + self.xapi_session.xenapi.VDI.destroy(vdi_ref) + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def get_changes(self): + """Finds VM parameters that differ from specified ones. + + This method builds a dictionary with hierarchy of VM parameters + that differ from those specified in module parameters. + + Returns: + list: VM parameters that differ from those specified in + module parameters. + """ + # Safety check. + if not self.exists(): + self.module.fail_json(msg="Called get_changes on non existing VM!") + + need_poweredoff = False + + if self.module.params['is_template']: + need_poweredoff = True + + try: + # This VM could be a template or a snapshot. In that case we fail + # because we can't reconfigure them or it would just be too + # dangerous. + if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']: + self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.") + + if self.vm_params['is_a_snapshot']: + self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.") + + # Let's build a list of parameters that changed. + config_changes = [] + + # Name could only differ if we found an existing VM by uuid. + if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']: + if self.module.params['name']: + config_changes.append('name') + else: + self.module.fail_json(msg="VM check name: VM name cannot be an empty string!") + + if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']: + config_changes.append('name_desc') + + # Folder parameter is found in other_config. + vm_other_config = self.vm_params['other_config'] + vm_folder = vm_other_config.get('folder', '') + + if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder: + config_changes.append('folder') + + if self.module.params['home_server'] is not None: + if (self.module.params['home_server'] and + (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])): + + # Check existence only. Ignore return value. + get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True, + msg_prefix="VM check home_server: ") + + config_changes.append('home_server') + elif not self.module.params['home_server'] and self.vm_params['affinity']: + config_changes.append('home_server') + + config_changes_hardware = [] + + if self.module.params['hardware']: + num_cpus = self.module.params['hardware'].get('num_cpus') + + if num_cpus is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + num_cpus = int(num_cpus) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!") + + if num_cpus < 1: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!") + + # We can use VCPUs_at_startup or VCPUs_max parameter. I'd + # say the former is the way to go but this needs + # confirmation and testing. + if num_cpus != int(self.vm_params['VCPUs_at_startup']): + config_changes_hardware.append('num_cpus') + # For now, we don't support hotpluging so VM has to be in + # poweredoff state to reconfigure. + need_poweredoff = True + + num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket') + + if num_cpu_cores_per_socket is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + num_cpu_cores_per_socket = int(num_cpu_cores_per_socket) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!") + + if num_cpu_cores_per_socket < 1: + self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!") + + if num_cpus and num_cpus % num_cpu_cores_per_socket != 0: + self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!") + + vm_platform = self.vm_params['platform'] + vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1)) + + if num_cpu_cores_per_socket != vm_cores_per_socket: + config_changes_hardware.append('num_cpu_cores_per_socket') + # For now, we don't support hotpluging so VM has to be + # in poweredoff state to reconfigure. + need_poweredoff = True + + memory_mb = self.module.params['hardware'].get('memory_mb') + + if memory_mb is not None: + # Kept for compatibility with older Ansible versions that + # do not support subargument specs. + try: + memory_mb = int(memory_mb) + except ValueError as e: + self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!") + + if memory_mb < 1: + self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!") + + # There are multiple memory parameters: + # - memory_dynamic_max + # - memory_dynamic_min + # - memory_static_max + # - memory_static_min + # - memory_target + # + # memory_target seems like a good candidate but it returns 0 for + # halted VMs so we can't use it. + # + # I decided to use memory_dynamic_max and memory_static_max + # and use whichever is larger. This strategy needs validation + # and testing. + # + # XenServer stores memory size in bytes so we need to divide + # it by 1024*1024 = 1048576. + if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576): + config_changes_hardware.append('memory_mb') + # For now, we don't support hotpluging so VM has to be in + # poweredoff state to reconfigure. + need_poweredoff = True + + if config_changes_hardware: + config_changes.append({"hardware": config_changes_hardware}) + + config_changes_disks = [] + config_new_disks = [] + + # Find allowed userdevices. + vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref) + + if self.module.params['disks']: + # Get the list of all disk. Filter out any CDs found. + vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"] + + # Number of disks defined in module params have to be same or + # higher than a number of existing disks attached to the VM. + # We don't support removal or detachment of disks. + if len(self.module.params['disks']) < len(vm_disk_params_list): + self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" % + (len(self.module.params['disks']), len(vm_disk_params_list))) + + # Find the highest disk occupied userdevice. + if not vm_disk_params_list: + vm_disk_userdevice_highest = "-1" + else: + vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice'] + + for position in range(len(self.module.params['disks'])): + if position < len(vm_disk_params_list): + vm_disk_params = vm_disk_params_list[position] + else: + vm_disk_params = None + + disk_params = self.module.params['disks'][position] + + disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position) + + disk_name = disk_params.get('name') + + if disk_name is not None and not disk_name: + self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position) + + # If this is an existing disk. + if vm_disk_params and vm_disk_params['VDI']: + disk_changes = [] + + if disk_name and disk_name != vm_disk_params['VDI']['name_label']: + disk_changes.append('name') + + disk_name_desc = disk_params.get('name_desc') + + if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']: + disk_changes.append('name_desc') + + if disk_size: + if disk_size > int(vm_disk_params['VDI']['virtual_size']): + disk_changes.append('size') + need_poweredoff = True + elif disk_size < int(vm_disk_params['VDI']['virtual_size']): + self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). " + "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size'])) + + config_changes_disks.append(disk_changes) + # If this is a new disk. + else: + if not disk_size: + self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position) + + disk_sr_uuid = disk_params.get('sr_uuid') + disk_sr = disk_params.get('sr') + + if disk_sr_uuid is not None or disk_sr is not None: + # Check existence only. Ignore return value. + get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True, + msg_prefix="VM check disks[%s]: " % position) + elif self.default_sr_ref == 'OpaqueRef:NULL': + self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position) + + if not vbd_userdevices_allowed: + self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position) + + disk_userdevice = None + + # We need to place a new disk right above the highest + # placed existing disk to maintain relative disk + # positions pairable with disk specifications in + # module params. That place must not be occupied by + # some other device like CD-ROM. + for userdevice in vbd_userdevices_allowed: + if int(userdevice) > int(vm_disk_userdevice_highest): + disk_userdevice = userdevice + vbd_userdevices_allowed.remove(userdevice) + vm_disk_userdevice_highest = userdevice + break + + # If no place was found. + if disk_userdevice is None: + # Highest occupied place could be a CD-ROM device + # so we have to include all devices regardless of + # type when calculating out-of-bound position. + disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1) + self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice)) + + # For new disks we only track their position. + config_new_disks.append((position, disk_userdevice)) + + # We should append config_changes_disks to config_changes only + # if there is at least one changed disk, else skip. + for disk_change in config_changes_disks: + if disk_change: + config_changes.append({"disks_changed": config_changes_disks}) + break + + if config_new_disks: + config_changes.append({"disks_new": config_new_disks}) + + config_changes_cdrom = [] + + if self.module.params['cdrom']: + # Get the list of all CD-ROMs. Filter out any regular disks + # found. If we found no existing CD-ROM, we will create it + # later else take the first one found. + vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"] + + # If no existing CD-ROM is found, we will need to add one. + # We need to check if there is any userdevice allowed. + if not vm_cdrom_params_list and not vbd_userdevices_allowed: + self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!") + + cdrom_type = self.module.params['cdrom'].get('type') + cdrom_iso_name = self.module.params['cdrom'].get('iso_name') + + # If cdrom.iso_name is specified but cdrom.type is not, + # then set cdrom.type to 'iso', unless cdrom.iso_name is + # an empty string, in that case set cdrom.type to 'none'. + if not cdrom_type: + if cdrom_iso_name: + cdrom_type = "iso" + elif cdrom_iso_name is not None: + cdrom_type = "none" + + self.module.params['cdrom']['type'] = cdrom_type + + # If type changed. + if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])): + config_changes_cdrom.append('type') + + if cdrom_type == "iso": + # Check if ISO exists. + # Check existence only. Ignore return value. + get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True, + msg_prefix="VM check cdrom.iso_name: ") + + # Is ISO image changed? + if (cdrom_iso_name and + (not vm_cdrom_params_list or + not vm_cdrom_params_list[0]['VDI'] or + cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])): + config_changes_cdrom.append('iso_name') + + if config_changes_cdrom: + config_changes.append({"cdrom": config_changes_cdrom}) + + config_changes_networks = [] + config_new_networks = [] + + # Find allowed devices. + vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref) + + if self.module.params['networks']: + # Number of VIFs defined in module params have to be same or + # higher than a number of existing VIFs attached to the VM. + # We don't support removal of VIFs. + if len(self.module.params['networks']) < len(self.vm_params['VIFs']): + self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" % + (len(self.module.params['networks']), len(self.vm_params['VIFs']))) + + # Find the highest occupied device. + if not self.vm_params['VIFs']: + vif_device_highest = "-1" + else: + vif_device_highest = self.vm_params['VIFs'][-1]['device'] + + for position in range(len(self.module.params['networks'])): + if position < len(self.vm_params['VIFs']): + vm_vif_params = self.vm_params['VIFs'][position] + else: + vm_vif_params = None + + network_params = self.module.params['networks'][position] + + network_name = network_params.get('name') + + if network_name is not None and not network_name: + self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position) + + if network_name: + # Check existence only. Ignore return value. + get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True, + msg_prefix="VM check networks[%s]: " % position) + + network_mac = network_params.get('mac') + + if network_mac is not None: + network_mac = network_mac.lower() + + if not is_mac(network_mac): + self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac)) + + # IPv4 reconfiguration. + network_type = network_params.get('type') + network_ip = network_params.get('ip') + network_netmask = network_params.get('netmask') + network_prefix = None + + # If networks.ip is specified and networks.type is not, + # then set networks.type to 'static'. + if not network_type and network_ip: + network_type = "static" + + # XenServer natively supports only 'none' and 'static' + # type with 'none' being the same as 'dhcp'. + if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp": + network_type = "none" + + if network_type and network_type == "static": + if network_ip is not None: + network_ip_split = network_ip.split('/') + network_ip = network_ip_split[0] + + if network_ip and not is_valid_ip_addr(network_ip): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip)) + + if len(network_ip_split) > 1: + network_prefix = network_ip_split[1] + + if not is_valid_ip_prefix(network_prefix): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix)) + + if network_netmask is not None: + if not is_valid_ip_netmask(network_netmask): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask)) + + network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True) + elif network_prefix is not None: + network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True) + + # If any parameter is overridden at this point, update it. + if network_type: + network_params['type'] = network_type + + if network_ip: + network_params['ip'] = network_ip + + if network_netmask: + network_params['netmask'] = network_netmask + + if network_prefix: + network_params['prefix'] = network_prefix + + network_gateway = network_params.get('gateway') + + # Gateway can be an empty string (when removing gateway + # configuration) but if it is not, it should be validated. + if network_gateway and not is_valid_ip_addr(network_gateway): + self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway)) + + # IPv6 reconfiguration. + network_type6 = network_params.get('type6') + network_ip6 = network_params.get('ip6') + network_prefix6 = None + + # If networks.ip6 is specified and networks.type6 is not, + # then set networks.type6 to 'static'. + if not network_type6 and network_ip6: + network_type6 = "static" + + # XenServer natively supports only 'none' and 'static' + # type with 'none' being the same as 'dhcp'. + if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp": + network_type6 = "none" + + if network_type6 and network_type6 == "static": + if network_ip6 is not None: + network_ip6_split = network_ip6.split('/') + network_ip6 = network_ip6_split[0] + + if network_ip6 and not is_valid_ip6_addr(network_ip6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6)) + + if len(network_ip6_split) > 1: + network_prefix6 = network_ip6_split[1] + + if not is_valid_ip6_prefix(network_prefix6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6)) + + # If any parameter is overridden at this point, update it. + if network_type6: + network_params['type6'] = network_type6 + + if network_ip6: + network_params['ip6'] = network_ip6 + + if network_prefix6: + network_params['prefix6'] = network_prefix6 + + network_gateway6 = network_params.get('gateway6') + + # Gateway can be an empty string (when removing gateway + # configuration) but if it is not, it should be validated. + if network_gateway6 and not is_valid_ip6_addr(network_gateway6): + self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6)) + + # If this is an existing VIF. + if vm_vif_params and vm_vif_params['network']: + network_changes = [] + + if network_name and network_name != vm_vif_params['network']['name_label']: + network_changes.append('name') + + if network_mac and network_mac != vm_vif_params['MAC'].lower(): + network_changes.append('mac') + + if self.vm_params['customization_agent'] == "native": + if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower(): + network_changes.append('type') + + if network_type and network_type == "static": + if network_ip and (not vm_vif_params['ipv4_addresses'] or + not vm_vif_params['ipv4_addresses'][0] or + network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]): + network_changes.append('ip') + + if network_prefix and (not vm_vif_params['ipv4_addresses'] or + not vm_vif_params['ipv4_addresses'][0] or + network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]): + network_changes.append('prefix') + network_changes.append('netmask') + + if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']: + network_changes.append('gateway') + + if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower(): + network_changes.append('type6') + + if network_type6 and network_type6 == "static": + if network_ip6 and (not vm_vif_params['ipv6_addresses'] or + not vm_vif_params['ipv6_addresses'][0] or + network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]): + network_changes.append('ip6') + + if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or + not vm_vif_params['ipv6_addresses'][0] or + network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]): + network_changes.append('prefix6') + + if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']: + network_changes.append('gateway6') + + elif self.vm_params['customization_agent'] == "custom": + vm_xenstore_data = self.vm_params['xenstore_data'] + + if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"): + network_changes.append('type') + need_poweredoff = True + + if network_type and network_type == "static": + if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""): + network_changes.append('ip') + need_poweredoff = True + + if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""): + network_changes.append('prefix') + network_changes.append('netmask') + need_poweredoff = True + + if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' % + vm_vif_params['device'], ""): + network_changes.append('gateway') + need_poweredoff = True + + if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"): + network_changes.append('type6') + need_poweredoff = True + + if network_type6 and network_type6 == "static": + if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""): + network_changes.append('ip6') + need_poweredoff = True + + if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""): + network_changes.append('prefix6') + need_poweredoff = True + + if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' % + vm_vif_params['device'], ""): + network_changes.append('gateway6') + need_poweredoff = True + + config_changes_networks.append(network_changes) + # If this is a new VIF. + else: + if not network_name: + self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position) + + if network_type and network_type == "static" and network_ip and not network_netmask: + self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position) + + if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6: + self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position) + + # Restart is needed if we are adding new network + # interface with IP/gateway parameters specified + # and custom agent is used. + if self.vm_params['customization_agent'] == "custom": + for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']: + if network_params.get(parameter): + need_poweredoff = True + break + + if not vif_devices_allowed: + self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position) + + # We need to place a new network interface right above the + # highest placed existing interface to maintain relative + # positions pairable with network interface specifications + # in module params. + vif_device = str(int(vif_device_highest) + 1) + + if vif_device not in vif_devices_allowed: + self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device)) + + vif_devices_allowed.remove(vif_device) + vif_device_highest = vif_device + + # For new VIFs we only track their position. + config_new_networks.append((position, vif_device)) + + # We should append config_changes_networks to config_changes only + # if there is at least one changed network, else skip. + for network_change in config_changes_networks: + if network_change: + config_changes.append({"networks_changed": config_changes_networks}) + break + + if config_new_networks: + config_changes.append({"networks_new": config_new_networks}) + + config_changes_custom_params = [] + + if self.module.params['custom_params']: + for position in range(len(self.module.params['custom_params'])): + custom_param = self.module.params['custom_params'][position] + + custom_param_key = custom_param['key'] + custom_param_value = custom_param['value'] + + if custom_param_key not in self.vm_params: + self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key)) + + if custom_param_value != self.vm_params[custom_param_key]: + # We only need to track custom param position. + config_changes_custom_params.append(position) + + if config_changes_custom_params: + config_changes.append({"custom_params": config_changes_custom_params}) + + if need_poweredoff: + config_changes.append('need_poweredoff') + + return config_changes + + except XenAPI.Failure as f: + self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + + def get_normalized_disk_size(self, disk_params, msg_prefix=""): + """Parses disk size parameters and returns disk size in bytes. + + This method tries to parse disk size module parameters. It fails + with an error message if size cannot be parsed. + + Args: + disk_params (dist): A dictionary with disk parameters. + msg_prefix (str): A string error messages should be prefixed + with (default: ""). + + Returns: + int: disk size in bytes if disk size is successfully parsed or + None if no disk size parameters were found. + """ + # There should be only single size spec but we make a list of all size + # specs just in case. Priority is given to 'size' but if not found, we + # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one + # found. + disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')] + + if disk_size_spec: + try: + # size + if "size" in disk_size_spec: + size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)') + disk_size_m = size_regex.match(disk_params['size']) + + if disk_size_m: + size = disk_size_m.group(1) + unit = disk_size_m.group(2) + else: + raise ValueError + # size_tb, size_gb, size_mb, size_kb, size_b + else: + size = disk_params[disk_size_spec[0]] + unit = disk_size_spec[0].split('_')[-1] + + if not unit: + unit = "b" + else: + unit = unit.lower() + + if re.match(r'\d+\.\d+', size): + # We found float value in string, let's typecast it. + if unit == "b": + # If we found float but unit is bytes, we get the integer part only. + size = int(float(size)) + else: + size = float(size) + else: + # We found int value in string, let's typecast it. + size = int(size) + + if not size or size < 0: + raise ValueError + + except (TypeError, ValueError, NameError): + # Common failure + self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix) + + disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0) + + if unit in disk_units: + return int(size * (1024 ** disk_units[unit])) + else: + self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." % + (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key])))) + else: + return None + + @staticmethod + def get_cdrom_type(vm_cdrom_params): + """Returns VM CD-ROM type.""" + # TODO: implement support for detecting type host. No server to test + # this on at the moment. + if vm_cdrom_params['empty']: + return "none" + else: + return "iso" + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', + choices=['present', 'absent', 'poweredon']), + name=dict(type='str', aliases=['name_label']), + name_desc=dict(type='str'), + uuid=dict(type='str'), + template=dict(type='str', aliases=['template_src']), + template_uuid=dict(type='str'), + is_template=dict(type='bool', default=False), + folder=dict(type='str'), + hardware=dict( + type='dict', + options=dict( + num_cpus=dict(type='int'), + num_cpu_cores_per_socket=dict(type='int'), + memory_mb=dict(type='int'), + ), + ), + disks=dict( + type='list', + elements='dict', + options=dict( + size=dict(type='str'), + size_tb=dict(type='str'), + size_gb=dict(type='str'), + size_mb=dict(type='str'), + size_kb=dict(type='str'), + size_b=dict(type='str'), + name=dict(type='str', aliases=['name_label']), + name_desc=dict(type='str'), + sr=dict(type='str'), + sr_uuid=dict(type='str'), + ), + aliases=['disk'], + mutually_exclusive=[ + ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'], + ['sr', 'sr_uuid'], + ], + ), + cdrom=dict( + type='dict', + options=dict( + type=dict(type='str', choices=['none', 'iso']), + iso_name=dict(type='str'), + ), + required_if=[ + ['type', 'iso', ['iso_name']], + ], + ), + networks=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', aliases=['name_label']), + mac=dict(type='str'), + type=dict(type='str', choices=['none', 'dhcp', 'static']), + ip=dict(type='str'), + netmask=dict(type='str'), + gateway=dict(type='str'), + type6=dict(type='str', choices=['none', 'dhcp', 'static']), + ip6=dict(type='str'), + gateway6=dict(type='str'), + ), + aliases=['network'], + required_if=[ + ['type', 'static', ['ip']], + ['type6', 'static', ['ip6']], + ], + ), + home_server=dict(type='str'), + custom_params=dict( + type='list', + elements='dict', + options=dict( + key=dict(type='str', required=True, no_log=False), + value=dict(type='raw', required=True), + ), + ), + wait_for_ip_address=dict(type='bool', default=False), + state_change_timeout=dict(type='int', default=0), + linked_clone=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + mutually_exclusive=[ + ['template', 'template_uuid'], + ], + ) + + result = {'failed': False, 'changed': False} + + vm = XenServerVM(module) + + # Find existing VM + if vm.exists(): + if module.params['state'] == "absent": + vm.destroy() + result['changed'] = True + elif module.params['state'] == "present": + config_changes = vm.reconfigure() + + if config_changes: + result['changed'] = True + + # Make new disk and network changes more user friendly + # and informative. + for change in config_changes: + if isinstance(change, dict): + if change.get('disks_new'): + disks_new = [] + + for position, userdevice in change['disks_new']: + disk_new_params = {"position": position, "vbd_userdevice": userdevice} + disk_params = module.params['disks'][position] + + for k in disk_params.keys(): + if disk_params[k] is not None: + disk_new_params[k] = disk_params[k] + + disks_new.append(disk_new_params) + + if disks_new: + change['disks_new'] = disks_new + + elif change.get('networks_new'): + networks_new = [] + + for position, device in change['networks_new']: + network_new_params = {"position": position, "vif_device": device} + network_params = module.params['networks'][position] + + for k in network_params.keys(): + if network_params[k] is not None: + network_new_params[k] = network_params[k] + + networks_new.append(network_new_params) + + if networks_new: + change['networks_new'] = networks_new + + result['changes'] = config_changes + + elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]: + result['changed'] = vm.set_power_state(module.params['state']) + elif module.params['state'] != "absent": + vm.deploy() + result['changed'] = True + + if module.params['wait_for_ip_address'] and module.params['state'] != "absent": + vm.wait_for_ip_address() + + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py new file mode 100644 index 000000000..dd28cf7d0 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: xenserver_guest_info +short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to gather essential VM facts. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change. +requirements: +- python >= 2.6 +- XenAPI +options: + name: + description: + - Name of the VM to gather facts from. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + aliases: [ name_label ] + uuid: + description: + - UUID of the VM to gather fact of. This is XenServer's unique identifier. + - It is required if name is not unique. + type: str +extends_documentation_fragment: +- community.general.xenserver.documentation +- community.general.attributes +- community.general.attributes.info_module +''' + +EXAMPLES = r''' +- name: Gather facts + community.general.xenserver_guest_info: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_11 + delegate_to: localhost + register: facts +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "testvm_11-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "testvm_11-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "testvm_11", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +''' + +HAS_XENAPI = False +try: + import XenAPI # noqa: F401, pylint: disable=unused-import + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to AnsibleModule object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.gather_params() + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + name=dict(type='str', aliases=['name_label']), + uuid=dict(type='str'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + ) + + result = {'failed': False, 'changed': False} + + # Module will exit with an error message if no VM is found. + vm = XenServerVM(module) + + # Gather facts. + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py new file mode 100644 index 000000000..ba88bbf1d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: xenserver_guest_powerstate +short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool +description: > + This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. +author: +- Bojan Vitnik (@bvitnik) +notes: +- Minimal supported version of XenServer is 5.6. +- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. +- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside + Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your + Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: + U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' +- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are + accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' +- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) + which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' +requirements: +- python >= 2.6 +- XenAPI +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Specify the state VM should be in. + - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. + - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. + type: str + default: present + choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] + name: + description: + - Name of the VM to manage. + - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. + - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - This parameter is case sensitive. + type: str + aliases: [ name_label ] + uuid: + description: + - UUID of the VM to manage if known. This is XenServer's unique identifier. + - It is required if name is not unique. + type: str + wait_for_ip_address: + description: + - Wait until XenServer detects an IP address for the VM. + - This requires XenServer Tools to be preinstalled on the VM to work properly. + type: bool + default: false + state_change_timeout: + description: + - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: true).' + - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. + - In case of timeout, module will generate an error message. + type: int + default: 0 +extends_documentation_fragment: +- community.general.xenserver.documentation +- community.general.attributes + +''' + +EXAMPLES = r''' +- name: Power on VM + community.general.xenserver_guest_powerstate: + hostname: "{{ xenserver_hostname }}" + username: "{{ xenserver_username }}" + password: "{{ xenserver_password }}" + name: testvm_11 + state: powered-on + delegate_to: localhost + register: facts +''' + +RETURN = r''' +instance: + description: Metadata about the VM + returned: always + type: dict + sample: { + "cdrom": { + "type": "none" + }, + "customization_agent": "native", + "disks": [ + { + "name": "windows-template-testing-0", + "name_desc": "", + "os_device": "xvda", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "0" + }, + { + "name": "windows-template-testing-1", + "name_desc": "", + "os_device": "xvdb", + "size": 42949672960, + "sr": "Local storage", + "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", + "vbd_userdevice": "1" + } + ], + "domid": "56", + "folder": "", + "hardware": { + "memory_mb": 8192, + "num_cpu_cores_per_socket": 2, + "num_cpus": 4 + }, + "home_server": "", + "is_template": false, + "name": "windows-template-testing", + "name_desc": "", + "networks": [ + { + "gateway": "192.168.0.254", + "gateway6": "fc00::fffe", + "ip": "192.168.0.200", + "ip6": [ + "fe80:0000:0000:0000:e9cb:625a:32c5:c291", + "fc00:0000:0000:0000:0000:0000:0000:0001" + ], + "mac": "ba:91:3a:48:20:76", + "mtu": "1500", + "name": "Pool-wide network associated with eth1", + "netmask": "255.255.255.128", + "prefix": "25", + "prefix6": "64", + "vif_device": "0" + } + ], + "other_config": { + "base_template_name": "Windows Server 2016 (64-bit)", + "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", + "install-methods": "cdrom", + "instant": "true", + "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" + }, + "platform": { + "acpi": "1", + "apic": "true", + "cores-per-socket": "2", + "device_id": "0002", + "hpet": "true", + "nx": "true", + "pae": "true", + "timeoffset": "-25200", + "vga": "std", + "videoram": "8", + "viridian": "true", + "viridian_reference_tsc": "true", + "viridian_time_ref_count": "true" + }, + "state": "poweredon", + "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", + "xenstore_data": { + "vm-data": "" + } + } +''' + +HAS_XENAPI = False +try: + import XenAPI # noqa: F401, pylint: disable=unused-import + HAS_XENAPI = True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts, set_vm_power_state, + wait_for_vm_ip_address) + + +class XenServerVM(XenServerObject): + """Class for managing XenServer VM. + + Attributes: + vm_ref (str): XAPI reference to VM. + vm_params (dict): A dictionary with VM parameters as returned + by gather_vm_params() function. + """ + + def __init__(self, module): + """Inits XenServerVM using module parameters. + + Args: + module: Reference to Ansible module object. + """ + super(XenServerVM, self).__init__(module) + + self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") + self.gather_params() + + def gather_params(self): + """Gathers all VM parameters available in XAPI database.""" + self.vm_params = gather_vm_params(self.module, self.vm_ref) + + def gather_facts(self): + """Gathers and returns VM facts.""" + return gather_vm_facts(self.module, self.vm_params) + + def set_power_state(self, power_state): + """Controls VM power state.""" + state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) + + # If state has changed, update vm_params. + if state_changed: + self.vm_params['power_state'] = current_state.capitalize() + + return state_changed + + def wait_for_ip_address(self): + """Waits for VM to acquire an IP address.""" + self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) + + +def main(): + argument_spec = xenserver_common_argument_spec() + argument_spec.update( + state=dict(type='str', default='present', + choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), + name=dict(type='str', aliases=['name_label']), + uuid=dict(type='str'), + wait_for_ip_address=dict(type='bool', default=False), + state_change_timeout=dict(type='int', default=0), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[ + ['name', 'uuid'], + ], + ) + + result = {'failed': False, 'changed': False} + + # Module will exit with an error message if no VM is found. + vm = XenServerVM(module) + + # Set VM power state. + if module.params['state'] != "present": + result['changed'] = vm.set_power_state(module.params['state']) + + if module.params['wait_for_ip_address']: + vm.wait_for_ip_address() + + result['instance'] = vm.gather_facts() + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xfconf.py b/ansible_collections/community/general/plugins/modules/xfconf.py new file mode 100644 index 000000000..567117d40 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xfconf.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2017, Joseph Benden +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: xfconf +author: + - "Joseph Benden (@jbenden)" + - "Alexei Znamensky (@russoz)" +short_description: Edit XFCE4 Configurations +description: + - This module allows for the manipulation of Xfce 4 Configuration with the help of + xfconf-query. Please see the xfconf-query(1) man page for more details. +seealso: + - name: xfconf-query(1) man page + description: Manual page of the C(xfconf-query) tool at the XFCE documentation site. + link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query' + + - name: xfconf - Configuration Storage System + description: XFCE documentation for the Xfconf configuration system. + link: 'https://docs.xfce.org/xfce/xfconf/start' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + channel: + description: + - A Xfconf preference channel is a top-level tree key, inside of the + Xfconf repository that corresponds to the location for which all + application properties/keys are stored. See man xfconf-query(1). + required: true + type: str + property: + description: + - A Xfce preference key is an element in the Xfconf repository + that corresponds to an application preference. See man xfconf-query(1). + required: true + type: str + value: + description: + - Preference properties typically have simple values such as strings, + integers, or lists of strings and integers. See man xfconf-query(1). + type: list + elements: raw + value_type: + description: + - The type of value being set. + - When providing more than one I(value_type), the length of the list must + be equal to the length of I(value). + - If only one I(value_type) is provided, but I(value) contains more than + on element, that I(value_type) will be applied to all elements of I(value). + - If the I(property) being set is an array and it can possibly have ony one + element in the array, then I(force_array=true) must be used to ensure + that C(xfconf-query) will interpret the value as an array rather than a + scalar. + - Support for C(uchar), C(char), C(uint64), and C(int64) has been added in community.general 4.8.0. + type: list + elements: str + choices: [ string, int, double, bool, uint, uchar, char, uint64, int64, float ] + state: + type: str + description: + - The action to take upon the property/value. + - The state C(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. + choices: [ present, absent ] + default: "present" + force_array: + description: + - Force array even if only one element. + type: bool + default: false + aliases: ['array'] + version_added: 1.0.0 + disable_facts: + description: + - The value C(false) is no longer allowed since community.general 4.0.0. + - This option is deprecated, and will be removed in community.general 8.0.0. + type: bool + default: true + version_added: 2.1.0 +''' + +EXAMPLES = """ +- name: Change the DPI to "192" + xfconf: + channel: "xsettings" + property: "/Xft/DPI" + value_type: "int" + value: "192" + +- name: Set workspace names (4) + xfconf: + channel: xfwm4 + property: /general/workspace_names + value_type: string + value: ['Main', 'Work1', 'Work2', 'Tmp'] + +- name: Set workspace names (1) + xfconf: + channel: xfwm4 + property: /general/workspace_names + value_type: string + value: ['Main'] + force_array: true +""" + +RETURN = ''' + channel: + description: The channel specified in the module parameters + returned: success + type: str + sample: "xsettings" + property: + description: The property specified in the module parameters + returned: success + type: str + sample: "/Xft/DPI" + value_type: + description: + - The type of the value that was changed (C(none) for C(reset) + state). Either a single string value or a list of strings for array + types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"int" or ["str", "str", "str"]' + value: + description: + - The value of the preference key after executing the module. Either a + single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"192" or ["orange", "yellow", "violet"]' + previous_value: + description: + - The value of the preference key before executing the module. + Either a single string value or a list of strings for array types. + - This is a string or a list of strings. + returned: success + type: any + sample: '"96" or ["red", "blue", "green"]' + cmd: + description: + - A list with the resulting C(xfconf-query) command executed by the module. + returned: success + type: list + elements: str + version_added: 5.4.0 + sample: + - /usr/bin/xfconf-query + - --channel + - xfce4-panel + - --property + - /plugins/plugin-19/timezone + - --create + - --type + - string + - --set + - Pacific/Auckland +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner + + +class XFConfProperty(StateModuleHelper): + change_params = ('value', ) + diff_params = ('value', ) + output_params = ('property', 'channel', 'value') + facts_params = ('property', 'channel', 'value') + module = dict( + argument_spec=dict( + state=dict(type='str', choices=("present", "absent"), default="present"), + channel=dict(type='str', required=True), + property=dict(type='str', required=True), + value_type=dict(type='list', elements='str', + choices=('string', 'int', 'double', 'bool', 'uint', 'uchar', 'char', 'uint64', 'int64', 'float')), + value=dict(type='list', elements='raw'), + force_array=dict(type='bool', default=False, aliases=['array']), + disable_facts=dict( + type='bool', default=True, + removed_in_version='8.0.0', + removed_from_collection='community.general' + ), + ), + required_if=[('state', 'present', ['value', 'value_type'])], + required_together=[('value', 'value_type')], + supports_check_mode=True, + ) + + default_state = 'present' + + def update_xfconf_output(self, **kwargs): + self.update_vars(meta={"output": True, "fact": True}, **kwargs) + + def __init_module__(self): + self.runner = xfconf_runner(self.module) + self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property, + self.vars.channel) + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('type', self.vars.value_type, fact=True) + self.vars.meta('value').set(initial_value=self.vars.previous_value) + + if self.vars.disable_facts is False: + self.do_raise('Returning results as facts has been removed. Stop using disable_facts=false.') + + def process_command_output(self, rc, out, err): + if err.rstrip() == self.does_not: + return None + if rc or len(err): + self.do_raise('xfconf-query failed with error (rc={0}): {1}'.format(rc, err)) + + result = out.rstrip() + if "Value is an array with" in result: + result = result.split("\n") + result.pop(0) + result.pop(0) + + return result + + def _get(self): + with self.runner('channel property', output_process=self.process_command_output) as ctx: + return ctx.run() + + def state_absent(self): + with self.runner('channel property reset', check_mode_skip=True) as ctx: + ctx.run(reset=True) + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + self.vars.value = None + + def state_present(self): + # stringify all values - in the CLI they will all be happy strings anyway + # and by doing this here the rest of the code can be agnostic to it + self.vars.value = [str(v) for v in self.vars.value] + value_type = self.vars.value_type + + values_len = len(self.vars.value) + types_len = len(value_type) + + if types_len == 1: + # use one single type for the entire list + value_type = value_type * values_len + elif types_len != values_len: + # or complain if lists' lengths are different + self.do_raise('Number of elements in "value" and "value_type" must be the same') + + # calculates if it is an array + self.vars.is_array = \ + bool(self.vars.force_array) or \ + isinstance(self.vars.previous_value, list) or \ + values_len > 1 + + with self.runner('channel property create force_array values_and_types', check_mode_skip=True) as ctx: + ctx.run(create=True, force_array=self.vars.is_array, values_and_types=(self.vars.value, value_type)) + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + if not self.vars.is_array: + self.vars.value = self.vars.value[0] + self.vars.type = value_type[0] + else: + self.vars.type = value_type + + +def main(): + XFConfProperty.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xfconf_info.py b/ansible_collections/community/general/plugins/modules/xfconf_info.py new file mode 100644 index 000000000..0a99201ef --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xfconf_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: xfconf_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve XFCE4 configurations +version_added: 3.5.0 +description: + - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + channel: + description: + - > + A Xfconf preference channel is a top-level tree key, inside of the + Xfconf repository that corresponds to the location for which all + application properties/keys are stored. + - If not provided, the module will list all available channels. + type: str + property: + description: + - > + A Xfce preference key is an element in the Xfconf repository + that corresponds to an application preference. + - If provided, then I(channel) is required. + - If not provided and a I(channel) is provided, then the module will list all available properties in that I(channel). + type: str +notes: + - See man xfconf-query(1) for more details. +''' + +EXAMPLES = """ +- name: Get list of all available channels + community.general.xfconf_info: {} + register: result + +- name: Get list of all properties in a specific channel + community.general.xfconf_info: + channel: xsettings + register: result + +- name: Retrieve the DPI value + community.general.xfconf_info: + channel: xsettings + property: /Xft/DPI + register: result + +- name: Get workspace names (4) + community.general.xfconf_info: + channel: xfwm4 + property: /general/workspace_names + register: result +""" + +RETURN = ''' + channels: + description: + - List of available channels. + - Returned when the module receives no parameter at all. + returned: success + type: list + elements: str + sample: + - xfce4-desktop + - displays + - xsettings + - xfwm4 + properties: + description: + - List of available properties for a specific channel. + - Returned by passing only the I(channel) parameter to the module. + returned: success + type: list + elements: str + sample: + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA + is_array: + description: + - Flag indicating whether the property is an array or not. + returned: success + type: bool + value: + description: + - The value of the property. Empty if the property is of array type. + returned: success + type: str + sample: Monospace 10 + value_array: + description: + - The array value of the property. Empty if the property is not of array type. + returned: success + type: list + elements: str + sample: + - Main + - Work + - Tmp +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner + + +class XFConfInfo(ModuleHelper): + module = dict( + argument_spec=dict( + channel=dict(type='str'), + property=dict(type='str'), + ), + required_by=dict( + property=['channel'] + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = xfconf_runner(self.module, check_rc=True) + self.vars.set("list_arg", False, output=False) + self.vars.set("is_array", False) + + def process_command_output(self, rc, out, err): + result = out.rstrip() + if "Value is an array with" in result: + result = result.split("\n") + result.pop(0) + result.pop(0) + self.vars.is_array = True + + return result + + def _process_list_properties(self, rc, out, err): + return out.splitlines() + + def _process_list_channels(self, rc, out, err): + lines = out.splitlines() + lines.pop(0) + lines = [s.lstrip() for s in lines] + return lines + + def __run__(self): + self.vars.list_arg = not (bool(self.vars.channel) and bool(self.vars.property)) + output = 'value' + proc = self.process_command_output + if self.vars.channel is None: + output = 'channels' + proc = self._process_list_channels + elif self.vars.property is None: + output = 'properties' + proc = self._process_list_properties + + with self.runner.context('list_arg channel property', output_process=proc) as ctx: + result = ctx.run(**self.vars) + + if not self.vars.list_arg and self.vars.is_array: + output = "value_array" + self.vars.set(output, result) + + +def main(): + XFConfInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/xfs_quota.py b/ansible_collections/community/general/plugins/modules/xfs_quota.py new file mode 100644 index 000000000..6d0521990 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xfs_quota.py @@ -0,0 +1,504 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2018, Emmanouil Kampitakis +# Copyright (c) 2018, William Leemans + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: xfs_quota +short_description: Manage quotas on XFS filesystems +description: + - Configure quotas on XFS filesystems. + - Before using this module /etc/projects and /etc/projid need to be configured. +author: + - William Leemans (@bushvin) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + type: + description: + - The XFS quota type. + type: str + required: true + choices: + - user + - group + - project + name: + description: + - The name of the user, group or project to apply the quota to, if other than default. + type: str + mountpoint: + description: + - The mount point on which to apply the quotas. + type: str + required: true + bhard: + description: + - Hard blocks quota limit. + - This argument supports human readable sizes. + type: str + bsoft: + description: + - Soft blocks quota limit. + - This argument supports human readable sizes. + type: str + ihard: + description: + - Hard inodes quota limit. + type: int + isoft: + description: + - Soft inodes quota limit. + type: int + rtbhard: + description: + - Hard realtime blocks quota limit. + - This argument supports human readable sizes. + type: str + rtbsoft: + description: + - Soft realtime blocks quota limit. + - This argument supports human readable sizes. + type: str + state: + description: + - Whether to apply the limits or remove them. + - When removing limit, they are set to 0, and not quite removed. + type: str + default: present + choices: + - present + - absent + +requirements: + - xfsprogs +""" + +EXAMPLES = r""" +- name: Set default project soft and hard limit on /opt of 1g + community.general.xfs_quota: + type: project + mountpoint: /opt + bsoft: 1g + bhard: 1g + state: present + +- name: Remove the default limits on /opt + community.general.xfs_quota: + type: project + mountpoint: /opt + state: absent + +- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048 + community.general.xfs_quota: + type: user + mountpoint: /home + isoft: 1024 + ihard: 2048 + +""" + +RETURN = r""" +bhard: + description: the current bhard setting in bytes + returned: always + type: int + sample: 1024 +bsoft: + description: the current bsoft setting in bytes + returned: always + type: int + sample: 1024 +ihard: + description: the current ihard setting in bytes + returned: always + type: int + sample: 100 +isoft: + description: the current isoft setting in bytes + returned: always + type: int + sample: 100 +rtbhard: + description: the current rtbhard setting in bytes + returned: always + type: int + sample: 1024 +rtbsoft: + description: the current rtbsoft setting in bytes + returned: always + type: int + sample: 1024 +""" + +import grp +import os +import pwd + +from ansible.module_utils.basic import AnsibleModule, human_to_bytes + + +def main(): + module = AnsibleModule( + argument_spec=dict( + bhard=dict(type="str"), + bsoft=dict(type="str"), + ihard=dict(type="int"), + isoft=dict(type="int"), + mountpoint=dict(type="str", required=True), + name=dict(type="str"), + rtbhard=dict(type="str"), + rtbsoft=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + type=dict(type="str", required=True, choices=["group", "project", "user"]), + ), + supports_check_mode=True, + ) + + quota_type = module.params["type"] + name = module.params["name"] + mountpoint = module.params["mountpoint"] + bhard = module.params["bhard"] + bsoft = module.params["bsoft"] + ihard = module.params["ihard"] + isoft = module.params["isoft"] + rtbhard = module.params["rtbhard"] + rtbsoft = module.params["rtbsoft"] + state = module.params["state"] + + xfs_quota_bin = module.get_bin_path("xfs_quota", True) + + if bhard is not None: + bhard = human_to_bytes(bhard) + + if bsoft is not None: + bsoft = human_to_bytes(bsoft) + + if rtbhard is not None: + rtbhard = human_to_bytes(rtbhard) + + if rtbsoft is not None: + rtbsoft = human_to_bytes(rtbsoft) + + result = dict( + changed=False, + ) + + if not os.path.ismount(mountpoint): + module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result) + + mp = get_fs_by_mountpoint(mountpoint) + if mp is None: + module.fail_json( + msg="Path '%s' is not a mount point or not located on an xfs file system." + % mountpoint, + **result + ) + + if quota_type == "user": + type_arg = "-u" + quota_default = "root" + if name is None: + name = quota_default + + if ( + "uquota" not in mp["mntopts"] + and "usrquota" not in mp["mntopts"] + and "quota" not in mp["mntopts"] + and "uqnoenforce" not in mp["mntopts"] + and "qnoenforce" not in mp["mntopts"] + ): + module.fail_json( + msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." + % mountpoint, + **result + ) + try: + pwd.getpwnam(name) + except KeyError as e: + module.fail_json(msg="User '%s' does not exist." % name, **result) + + elif quota_type == "group": + type_arg = "-g" + quota_default = "root" + if name is None: + name = quota_default + + if ( + "gquota" not in mp["mntopts"] + and "grpquota" not in mp["mntopts"] + and "gqnoenforce" not in mp["mntopts"] + ): + module.fail_json( + msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" + % (mountpoint, mp["mntopts"]), + **result + ) + try: + grp.getgrnam(name) + except KeyError as e: + module.fail_json(msg="User '%s' does not exist." % name, **result) + + elif quota_type == "project": + type_arg = "-p" + quota_default = "#0" + if name is None: + name = quota_default + + if ( + "pquota" not in mp["mntopts"] + and "prjquota" not in mp["mntopts"] + and "pqnoenforce" not in mp["mntopts"] + ): + module.fail_json( + msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." + % mountpoint, + **result + ) + + if name != quota_default and not os.path.isfile("/etc/projects"): + module.fail_json(msg="Path '/etc/projects' does not exist.", **result) + + if name != quota_default and not os.path.isfile("/etc/projid"): + module.fail_json(msg="Path '/etc/projid' does not exist.", **result) + + if name != quota_default and name is not None and get_project_id(name) is None: + module.fail_json( + msg="Entry '%s' has not been defined in /etc/projid." % name, **result + ) + + prj_set = True + if name != quota_default: + cmd = "project %s" % name + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) + if rc != 0: + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json(msg="Could not get project state.", **result) + else: + for line in stdout.split("\n"): + if ( + "Project Id '%s' - is not set." in line + or "project identifier is not set" in line + ): + prj_set = False + break + + if state == "present" and not prj_set: + if not module.check_mode: + cmd = "project -s %s" % name + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) + if rc != 0: + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json( + msg="Could not get quota realtime block report.", **result + ) + + result["changed"] = True + + elif state == "absent" and prj_set and name != quota_default: + if not module.check_mode: + cmd = "project -C %s" % name + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) + if rc != 0: + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json( + msg="Failed to clear managed tree from project quota control.", **result + ) + + result["changed"] = True + + current_bsoft, current_bhard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "b" + ) + current_isoft, current_ihard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "i" + ) + current_rtbsoft, current_rtbhard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "rtb" + ) + + # Set limits + if state == "absent": + bhard = 0 + bsoft = 0 + ihard = 0 + isoft = 0 + rtbhard = 0 + rtbsoft = 0 + + # Ensure that a non-existing quota does not trigger a change + current_bsoft = current_bsoft if current_bsoft is not None else 0 + current_bhard = current_bhard if current_bhard is not None else 0 + current_isoft = current_isoft if current_isoft is not None else 0 + current_ihard = current_ihard if current_ihard is not None else 0 + current_rtbsoft = current_rtbsoft if current_rtbsoft is not None else 0 + current_rtbhard = current_rtbhard if current_rtbhard is not None else 0 + + result["xfs_quota"] = dict( + bsoft=current_bsoft, + bhard=current_bhard, + isoft=current_isoft, + ihard=current_ihard, + rtbsoft=current_rtbsoft, + rtbhard=current_rtbhard, + ) + + limit = [] + if bsoft is not None and int(bsoft) != current_bsoft: + limit.append("bsoft=%s" % bsoft) + result["bsoft"] = int(bsoft) + + if bhard is not None and int(bhard) != current_bhard: + limit.append("bhard=%s" % bhard) + result["bhard"] = int(bhard) + + if isoft is not None and isoft != current_isoft: + limit.append("isoft=%s" % isoft) + result["isoft"] = isoft + + if ihard is not None and ihard != current_ihard: + limit.append("ihard=%s" % ihard) + result["ihard"] = ihard + + if rtbsoft is not None and int(rtbsoft) != current_rtbsoft: + limit.append("rtbsoft=%s" % rtbsoft) + result["rtbsoft"] = int(rtbsoft) + + if rtbhard is not None and int(rtbhard) != current_rtbhard: + limit.append("rtbhard=%s" % rtbhard) + result["rtbhard"] = int(rtbhard) + + if len(limit) > 0: + if not module.check_mode: + if name == quota_default: + cmd = "limit %s -d %s" % (type_arg, " ".join(limit)) + else: + cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name) + + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) + if rc != 0: + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json(msg="Could not set limits.", **result) + + result["changed"] = True + + module.exit_json(**result) + + +def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type): + soft = None + hard = None + + if quota_type == "project": + type_arg = "-p" + elif quota_type == "user": + type_arg = "-u" + elif quota_type == "group": + type_arg = "-g" + + if used_type == "b": + used_arg = "-b" + used_name = "blocks" + factor = 1024 + elif used_type == "i": + used_arg = "-i" + used_name = "inodes" + factor = 1 + elif used_type == "rtb": + used_arg = "-r" + used_name = "realtime blocks" + factor = 1024 + + rc, stdout, stderr = exec_quota( + module, xfs_quota_bin, "report %s %s" % (type_arg, used_arg), mountpoint + ) + + if rc != 0: + result = dict( + changed=False, + rc=rc, + stdout=stdout, + stderr=stderr, + ) + module.fail_json(msg="Could not get quota report for %s." % used_name, **result) + + for line in stdout.split("\n"): + line = line.strip().split() + if len(line) > 3 and line[0] == name: + soft = int(line[2]) * factor + hard = int(line[3]) * factor + break + + return soft, hard + + +def exec_quota(module, xfs_quota_bin, cmd, mountpoint): + cmd = [xfs_quota_bin, "-x", "-c"] + [cmd, mountpoint] + (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True) + if ( + "XFS_GETQUOTA: Operation not permitted" in stderr.split("\n") + or rc == 1 + and "xfs_quota: cannot set limits: Operation not permitted" + in stderr.split("\n") + ): + module.fail_json( + msg="You need to be root or have CAP_SYS_ADMIN capability to perform this operation" + ) + + return rc, stdout, stderr + + +def get_fs_by_mountpoint(mountpoint): + mpr = None + with open("/proc/mounts", "r") as s: + for line in s.readlines(): + mp = line.strip().split() + if len(mp) == 6 and mp[1] == mountpoint and mp[2] == "xfs": + mpr = dict( + zip(["spec", "file", "vfstype", "mntopts", "freq", "passno"], mp) + ) + mpr["mntopts"] = mpr["mntopts"].split(",") + break + return mpr + + +def get_project_id(name): + prjid = None + with open("/etc/projid", "r") as s: + for line in s.readlines(): + line = line.strip().partition(":") + if line[0] == name: + prjid = line[2] + break + + return prjid + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/xml.py b/ansible_collections/community/general/plugins/modules/xml.py new file mode 100644 index 000000000..5b9bba355 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/xml.py @@ -0,0 +1,996 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Red Hat, Inc. +# Copyright (c) 2014, Tim Bielawa +# Copyright (c) 2014, Magnus Hedemark +# Copyright (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: xml +short_description: Manage bits and pieces of XML files or strings +description: + - A CRUD-like interface to managing bits of XML files. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - Path to the file to operate on. + - This file must exist ahead of time. + - This parameter is required, unless I(xmlstring) is given. + type: path + aliases: [ dest, file ] + xmlstring: + description: + - A string containing XML on which to operate. + - This parameter is required, unless I(path) is given. + type: str + xpath: + description: + - A valid XPath expression describing the item(s) you want to manipulate. + - Operates on the document root, C(/), by default. + type: str + namespaces: + description: + - The namespace C(prefix:uri) mapping for the XPath expression. + - Needs to be a C(dict), not a C(list) of items. + type: dict + default: {} + state: + description: + - Set or remove an xpath selection (node(s), attribute(s)). + type: str + choices: [ absent, present ] + default: present + aliases: [ ensure ] + attribute: + description: + - The attribute to select when using parameter I(value). + - This is a string, not prepended with C(@). + type: raw + value: + description: + - Desired state of the selected attribute. + - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)). + - Elements default to no value (but present). + - Attributes default to an empty string. + type: raw + add_children: + description: + - Add additional child-element(s) to a selected element for a given I(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires I(xpath) to be set. + type: list + elements: raw + set_children: + description: + - Set the child-element(s) of a selected element for a given I(xpath). + - Removes any existing children. + - Child elements must be specified as in I(add_children). + - This parameter requires I(xpath) to be set. + type: list + elements: raw + count: + description: + - Search for a given I(xpath) and provide the count of any matches. + - This parameter requires I(xpath) to be set. + type: bool + default: false + print_match: + description: + - Search for a given I(xpath) and print out any matches. + - This parameter requires I(xpath) to be set. + type: bool + default: false + pretty_print: + description: + - Pretty print XML output. + type: bool + default: false + content: + description: + - Search for a given I(xpath) and get content. + - This parameter requires I(xpath) to be set. + type: str + choices: [ attribute, text ] + input_type: + description: + - Type of input for I(add_children) and I(set_children). + type: str + choices: [ xml, yaml ] + default: yaml + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + type: bool + default: false + strip_cdata_tags: + description: + - Remove CDATA tags surrounding text values. + - Note that this might break your XML file if text values contain characters that could be interpreted as XML. + type: bool + default: false + insertbefore: + description: + - Add additional child-element(s) before the first selected element for a given I(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires I(xpath) to be set. + type: bool + default: false + insertafter: + description: + - Add additional child-element(s) after the last selected element for a given I(xpath). + - Child elements must be given in a list and each item may be either a string + (eg. C(children=ansible) to add an empty C() child element), + or a hash where the key is an element name and the value is the element value. + - This parameter requires I(xpath) to be set. + type: bool + default: false +requirements: +- lxml >= 2.3.0 +notes: +- Use the C(--check) and C(--diff) options when testing your expressions. +- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure. +- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions. +- Beware that in case your XML elements are namespaced, you need to use the I(namespaces) parameter, see the examples. +- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them. +seealso: +- name: Xml module development community wiki + description: More information related to the development of this xml module. + link: https://github.com/ansible/community/wiki/Module:-xml +- name: Introduction to XPath + description: A brief tutorial on XPath (w3schools.com). + link: https://www.w3schools.com/xml/xpath_intro.asp +- name: XPath Reference document + description: The reference documentation on XSLT/XPath (developer.mozilla.org). + link: https://developer.mozilla.org/en-US/docs/Web/XPath +author: +- Tim Bielawa (@tbielawa) +- Magnus Hedemark (@magnus919) +- Dag Wieers (@dagwieers) +''' + +EXAMPLES = r''' +# Consider the following XML file: +# +# +# Tasty Beverage Co. +# +# Rochefort 10 +# St. Bernardus Abbot 12 +# Schlitz +# +# 10 +# +# +#
http://tastybeverageco.com
+#
+#
+ +- name: Remove the 'subjective' attribute of the 'rating' element + community.general.xml: + path: /foo/bar.xml + xpath: /business/rating/@subjective + state: absent + +- name: Set the rating to '11' + community.general.xml: + path: /foo/bar.xml + xpath: /business/rating + value: 11 + +# Retrieve and display the number of nodes +- name: Get count of 'beers' nodes + community.general.xml: + path: /foo/bar.xml + xpath: /business/beers/beer + count: true + register: hits + +- ansible.builtin.debug: + var: hits.count + +# Example where parent XML nodes are created automatically +- name: Add a 'phonenumber' element to the 'business' element + community.general.xml: + path: /foo/bar.xml + xpath: /business/phonenumber + value: 555-555-1234 + +- name: Add several more beers to the 'beers' element + community.general.xml: + path: /foo/bar.xml + xpath: /business/beers + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element + community.general.xml: + path: /foo/bar.xml + xpath: '/business/beers/beer[text()="Rochefort 10"]' + insertbefore: true + add_children: + - beer: Old Rasputin + - beer: Old Motor Oil + - beer: Old Curmudgeon + +# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements +- name: Add a 'validxhtml' element to the 'website' element + community.general.xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + +- name: Add an empty 'validatedon' attribute to the 'validxhtml' element + community.general.xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml/@validatedon + +- name: Add or modify an attribute, add element if needed + community.general.xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + attribute: validatedon + value: 1976-08-05 + +# How to read an attribute value and access it in Ansible +- name: Read an element's attribute values + community.general.xml: + path: /foo/bar.xml + xpath: /business/website/validxhtml + content: attribute + register: xmlresp + +- name: Show an attribute value + ansible.builtin.debug: + var: xmlresp.matches[0].validxhtml.validatedon + +- name: Remove all children from the 'website' element (option 1) + community.general.xml: + path: /foo/bar.xml + xpath: /business/website/* + state: absent + +- name: Remove all children from the 'website' element (option 2) + community.general.xml: + path: /foo/bar.xml + xpath: /business/website + set_children: [] + +# In case of namespaces, like in below XML, they have to be explicitly stated. +# +# +# +# +# +# + +# NOTE: There is the prefix 'x' in front of the 'bar' element, too. +- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false' + community.general.xml: + path: foo.xml + xpath: /x:foo/x:bar/y:baz + namespaces: + x: http://x.test + y: http://y.test + z: http://z.test + attribute: z:my_namespaced_attribute + value: 'false' + +- name: Adding building nodes with floor subnodes from a YAML variable + community.general.xml: + path: /foo/bar.xml + xpath: /business + add_children: + - building: + # Attributes + name: Scumm bar + location: Monkey island + # Subnodes + _: + - floor: Pirate hall + - floor: Grog storage + - construction_date: "1990" # Only strings are valid + - building: Grog factory + +# Consider this XML for following example - +# +# +# +# part to remove +# +# +# part to keep +# +# + +- name: Delete element node based upon attribute + community.general.xml: + path: bar.xml + xpath: /config/element[@name='test1'] + state: absent +''' + +RETURN = r''' +actions: + description: A dictionary with the original xpath, namespaces and state. + type: dict + returned: success + sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present} +backup_file: + description: The name of the backup file that was created + type: str + returned: when I(backup=true) + sample: /path/to/file.xml.1942.2017-08-24@14:16:01~ +count: + description: The count of xpath matches. + type: int + returned: when parameter 'count' is set + sample: 2 +matches: + description: The xpath matches found. + type: list + returned: when parameter 'print_match' is set +msg: + description: A message related to the performed action(s). + type: str + returned: always +xmlstring: + description: An XML string of the resulting output. + type: str + returned: when parameter 'xmlstring' is set +''' + +import copy +import json +import os +import re +import traceback + +from io import BytesIO + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +LXML_IMP_ERR = None +try: + from lxml import etree, objectify + HAS_LXML = True +except ImportError: + LXML_IMP_ERR = traceback.format_exc() + HAS_LXML = False + +from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib +from ansible.module_utils.six import iteritems, string_types +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common._collections_compat import MutableMapping + +_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" +_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT +# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate +# strings wrapped by the other delimiter' XPath trick, especially as simple XPath. +_XPSTR = "('(?:.*)'|\"(?:.*)\")" + +_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$") +_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$") +_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$") +_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$") +_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$") +_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$") + + +def has_changed(doc): + orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc))) + obj = etree.tostring(objectify.fromstring(etree.tostring(doc))) + return (orig_obj != obj) + + +def do_print_match(module, tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + match_xpaths = [] + for m in match: + match_xpaths.append(tree.getpath(m)) + match_str = json.dumps(match_xpaths) + msg = "selector '%s' match: %s" % (xpath, match_str) + finish(module, tree, xpath, namespaces, changed=False, msg=msg) + + +def count_nodes(module, tree, xpath, namespaces): + """ Return the count of nodes matching the xpath """ + hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces) + msg = "found %d nodes" % hits + finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits)) + + +def is_node(tree, xpath, namespaces): + """ Test if a given xpath matches anything and if that match is a node. + + For now we just assume you're only searching for one specific thing.""" + if xpath_matches(tree, xpath, namespaces): + # OK, it found something + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._Element): + return True + + return False + + +def is_attribute(tree, xpath, namespaces): + """ Test if a given xpath matches and that match is an attribute + + An xpath attribute search will only match one item""" + if xpath_matches(tree, xpath, namespaces): + match = tree.xpath(xpath, namespaces=namespaces) + if isinstance(match[0], etree._ElementStringResult): + return True + elif isinstance(match[0], etree._ElementUnicodeResult): + return True + return False + + +def xpath_matches(tree, xpath, namespaces): + """ Test if a node exists """ + if tree.xpath(xpath, namespaces=namespaces): + return True + return False + + +def delete_xpath_target(module, tree, xpath, namespaces): + """ Delete an attribute or element from a tree """ + changed = False + try: + for result in tree.xpath(xpath, namespaces=namespaces): + changed = True + # Get the xpath for this result + if is_attribute(tree, xpath, namespaces): + # Delete an attribute + parent = result.getparent() + # Pop this attribute match out of the parent + # node's 'attrib' dict by using this match's + # 'attrname' attribute for the key + parent.attrib.pop(result.attrname) + elif is_node(tree, xpath, namespaces): + # Delete an element + result.getparent().remove(result) + else: + raise Exception("Impossible error") + except Exception as e: + module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e)) + else: + finish(module, tree, xpath, namespaces, changed=changed) + + +def replace_children_of(children, match): + for element in list(match): + match.remove(element) + match.extend(children) + + +def set_target_children_inner(module, tree, xpath, namespaces, children, in_type): + matches = tree.xpath(xpath, namespaces=namespaces) + + # Create a list of our new children + children = children_to_nodes(module, children, in_type) + children_as_string = [etree.tostring(c) for c in children] + + changed = False + + # xpaths always return matches as a list, so.... + for match in matches: + # Check if elements differ + if len(list(match)) == len(children): + for idx, element in enumerate(list(match)): + if etree.tostring(element) != children_as_string[idx]: + replace_children_of(children, match) + changed = True + break + else: + replace_children_of(children, match) + changed = True + + return changed + + +def set_target_children(module, tree, xpath, namespaces, children, in_type): + changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type) + # Write it out + finish(module, tree, xpath, namespaces, changed=changed) + + +def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter): + if is_node(tree, xpath, namespaces): + new_kids = children_to_nodes(module, children, in_type) + if insertbefore or insertafter: + insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter) + else: + for node in tree.xpath(xpath, namespaces=namespaces): + node.extend(new_kids) + finish(module, tree, xpath, namespaces, changed=True) + else: + finish(module, tree, xpath, namespaces) + + +def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter): + """ + Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the + first xpath hit, with insertafter, it is inserted after the last xpath hit. + """ + insert_target = tree.xpath(xpath, namespaces=namespaces) + loc_index = 0 if insertbefore else -1 + index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index]) + parent = insert_target[0].getparent() + if insertafter: + index_in_parent += 1 + for child in children: + parent.insert(index_in_parent, child) + index_in_parent += 1 + + +def _extract_xpstr(g): + return g[1:-1] + + +def split_xpath_last(xpath): + """split an XPath of the form /foo/bar/baz into /foo/bar and baz""" + xpath = xpath.strip() + m = _RE_SPLITSIMPLELAST.match(xpath) + if m: + # requesting an element to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath) + if m: + # requesting an element to exist with an inner text + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSIMPLEATTRLAST.match(xpath) + if m: + # requesting an attribute to exist + return (m.group(1), [(m.group(2), None)]) + m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath) + if m: + # requesting an attribute to exist with a value + return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))]) + + m = _RE_SPLITSUBLAST.match(xpath) + if m: + content = [x.strip() for x in m.group(3).split(" and ")] + return (m.group(1), [('/' + m.group(2), content)]) + + m = _RE_SPLITONLYEQVALUE.match(xpath) + if m: + # requesting a change of inner text + return (m.group(1), [("", _extract_xpstr(m.group(2)))]) + return (xpath, []) + + +def nsnameToClark(name, namespaces): + if ":" in name: + (nsname, rawname) = name.split(":") + # return "{{%s}}%s" % (namespaces[nsname], rawname) + return "{{{0}}}{1}".format(namespaces[nsname], rawname) + + # no namespace name here + return name + + +def check_or_make_target(module, tree, xpath, namespaces): + (inner_xpath, changes) = split_xpath_last(xpath) + if (inner_xpath == xpath) or (changes is None): + module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + return False + + changed = False + + if not is_node(tree, inner_xpath, namespaces): + changed = check_or_make_target(module, tree, inner_xpath, namespaces) + + # we test again after calling check_or_make_target + if is_node(tree, inner_xpath, namespaces) and changes: + for (eoa, eoa_value) in changes: + if eoa and eoa[0] != '@' and eoa[0] != '/': + # implicitly creating an element + new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml") + if eoa_value: + for nk in new_kids: + nk.text = eoa_value + + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + changed = True + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa and eoa[0] == '/': + element = eoa[1:] + new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml") + for node in tree.xpath(inner_xpath, namespaces=namespaces): + node.extend(new_kids) + for nk in new_kids: + for subexpr in eoa_value: + # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" % + # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True)) + check_or_make_target(module, nk, "./" + subexpr, namespaces) + changed = True + + # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True)) + elif eoa == "": + for node in tree.xpath(inner_xpath, namespaces=namespaces): + if (node.text != eoa_value): + node.text = eoa_value + changed = True + + elif eoa and eoa[0] == '@': + attribute = nsnameToClark(eoa[1:], namespaces) + + for element in tree.xpath(inner_xpath, namespaces=namespaces): + changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value) + + if changing: + changed = changed or changing + if eoa_value is None: + value = "" + else: + value = eoa_value + element.attrib[attribute] = value + + # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" % + # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True))) + + else: + module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True)) + + return changed + + +def ensure_xpath_exists(module, tree, xpath, namespaces): + changed = False + + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + + finish(module, tree, xpath, namespaces, changed) + + +def set_target_inner(module, tree, xpath, namespaces, attribute, value): + changed = False + + try: + if not is_node(tree, xpath, namespaces): + changed = check_or_make_target(module, tree, xpath, namespaces) + except Exception as e: + missing_namespace = "" + # NOTE: This checks only the namespaces defined in root element! + # TODO: Implement a more robust check to check for child namespaces' existence + if tree.getroot().nsmap and ":" not in xpath: + missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n" + module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" % + (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc()) + + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node! tree is %s" % + (xpath, etree.tostring(tree, pretty_print=True))) + + for element in tree.xpath(xpath, namespaces=namespaces): + if not attribute: + changed = changed or (element.text != value) + if element.text != value: + element.text = value + else: + changed = changed or (element.get(attribute) != value) + if ":" in attribute: + attr_ns, attr_name = attribute.split(":") + # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name) + attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name) + if element.get(attribute) != value: + element.set(attribute, value) + + return changed + + +def set_target(module, tree, xpath, namespaces, attribute, value): + changed = set_target_inner(module, tree, xpath, namespaces, attribute, value) + finish(module, tree, xpath, namespaces, changed) + + +def get_element_text(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + elements.append({element.tag: element.text}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def get_element_attr(module, tree, xpath, namespaces): + if not is_node(tree, xpath, namespaces): + module.fail_json(msg="Xpath %s does not reference a node!" % xpath) + + elements = [] + for element in tree.xpath(xpath, namespaces=namespaces): + child = {} + for key in element.keys(): + value = element.get(key) + child.update({key: value}) + elements.append({element.tag: child}) + + finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements) + + +def child_to_element(module, child, in_type): + if in_type == 'xml': + infile = BytesIO(to_bytes(child, errors='surrogate_or_strict')) + + try: + parser = etree.XMLParser() + node = etree.parse(infile, parser) + return node.getroot() + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing child element: %s" % e) + elif in_type == 'yaml': + if isinstance(child, string_types): + return etree.Element(child) + elif isinstance(child, MutableMapping): + if len(child) > 1: + module.fail_json(msg="Can only create children from hashes with one key") + + (key, value) = next(iteritems(child)) + if isinstance(value, MutableMapping): + children = value.pop('_', None) + + node = etree.Element(key, value) + + if children is not None: + if not isinstance(children, list): + module.fail_json(msg="Invalid children type: %s, must be list." % type(children)) + + subnodes = children_to_nodes(module, children) + node.extend(subnodes) + else: + node = etree.Element(key) + node.text = value + return node + else: + module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child)) + else: + module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type) + + +def children_to_nodes(module=None, children=None, type='yaml'): + """turn a str/hash/list of str&hash into a list of elements""" + children = [] if children is None else children + + return [child_to_element(module, child, type) for child in children] + + +def make_pretty(module, tree): + xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + result = dict( + changed=False, + ) + + if module.params['path']: + xml_file = module.params['path'] + with open(xml_file, 'rb') as xml_content: + if xml_string != xml_content.read(): + result['changed'] = True + if not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + elif module.params['xmlstring']: + result['xmlstring'] = xml_string + # NOTE: Modifying a string is not considered a change ! + if xml_string != module.params['xmlstring']: + result['changed'] = True + + module.exit_json(**result) + + +def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()): + + result = dict( + actions=dict( + xpath=xpath, + namespaces=namespaces, + state=module.params['state'] + ), + changed=has_changed(tree), + ) + + if module.params['count'] or hitcount: + result['count'] = hitcount + + if module.params['print_match'] or matches: + result['matches'] = matches + + if msg: + result['msg'] = msg + + if result['changed']: + if module._diff: + result['diff'] = dict( + before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True), + after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True), + ) + + if module.params['path'] and not module.check_mode: + if module.params['backup']: + result['backup_file'] = module.backup_local(module.params['path']) + tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + if module.params['xmlstring']: + result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print']) + + module.exit_json(**result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', aliases=['dest', 'file']), + xmlstring=dict(type='str'), + xpath=dict(type='str'), + namespaces=dict(type='dict', default={}), + state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), + value=dict(type='raw'), + attribute=dict(type='raw'), + add_children=dict(type='list', elements='raw'), + set_children=dict(type='list', elements='raw'), + count=dict(type='bool', default=False), + print_match=dict(type='bool', default=False), + pretty_print=dict(type='bool', default=False), + content=dict(type='str', choices=['attribute', 'text']), + input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), + backup=dict(type='bool', default=False), + strip_cdata_tags=dict(type='bool', default=False), + insertbefore=dict(type='bool', default=False), + insertafter=dict(type='bool', default=False), + ), + supports_check_mode=True, + required_by=dict( + add_children=['xpath'], + attribute=['value'], + content=['xpath'], + set_children=['xpath'], + value=['xpath'], + ), + required_if=[ + ['count', True, ['xpath']], + ['print_match', True, ['xpath']], + ['insertbefore', True, ['xpath']], + ['insertafter', True, ['xpath']], + ], + required_one_of=[ + ['path', 'xmlstring'], + ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'], + ], + mutually_exclusive=[ + ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'], + ['path', 'xmlstring'], + ['insertbefore', 'insertafter'], + ], + ) + + xml_file = module.params['path'] + xml_string = module.params['xmlstring'] + xpath = module.params['xpath'] + namespaces = module.params['namespaces'] + state = module.params['state'] + value = json_dict_bytes_to_unicode(module.params['value']) + attribute = module.params['attribute'] + set_children = json_dict_bytes_to_unicode(module.params['set_children']) + add_children = json_dict_bytes_to_unicode(module.params['add_children']) + pretty_print = module.params['pretty_print'] + content = module.params['content'] + input_type = module.params['input_type'] + print_match = module.params['print_match'] + count = module.params['count'] + backup = module.params['backup'] + strip_cdata_tags = module.params['strip_cdata_tags'] + insertbefore = module.params['insertbefore'] + insertafter = module.params['insertafter'] + + # Check if we have lxml 2.3.0 or newer installed + if not HAS_LXML: + module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): + module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine') + elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): + module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.') + + # Check if the file exists + if xml_string: + infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) + elif os.path.isfile(xml_file): + infile = open(xml_file, 'rb') + else: + module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) + + # Parse and evaluate xpath expression + if xpath is not None: + try: + etree.XPath(xpath) + except etree.XPathSyntaxError as e: + module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) + except etree.XPathEvalError as e: + module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) + + # Try to parse in the target XML file + try: + parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) + doc = etree.parse(infile, parser) + except etree.XMLSyntaxError as e: + module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) + + # Ensure we have the original copy to compare + global orig_doc + orig_doc = copy.deepcopy(doc) + + if print_match: + do_print_match(module, doc, xpath, namespaces) + + if count: + count_nodes(module, doc, xpath, namespaces) + + if content == 'attribute': + get_element_attr(module, doc, xpath, namespaces) + elif content == 'text': + get_element_text(module, doc, xpath, namespaces) + + # File exists: + if state == 'absent': + # - absent: delete xpath target + delete_xpath_target(module, doc, xpath, namespaces) + + # - present: carry on + + # children && value both set?: should have already aborted by now + # add_children && set_children both set?: should have already aborted by now + + # set_children set? + if set_children is not None: + set_target_children(module, doc, xpath, namespaces, set_children, input_type) + + # add_children set? + if add_children: + add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) + + # No?: Carry on + + # Is the xpath target an attribute selector? + if value is not None: + set_target(module, doc, xpath, namespaces, attribute, value) + + # If an xpath was provided, we need to do something with the data + if xpath is not None: + ensure_xpath_exists(module, doc, xpath, namespaces) + + # Otherwise only reformat the xml data? + if pretty_print: + make_pretty(module, doc) + + module.fail_json(msg="Don't know what to do") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/yarn.py b/ansible_collections/community/general/plugins/modules/yarn.py new file mode 100644 index 000000000..c278951d5 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/yarn.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2017 David Gunter +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: yarn +short_description: Manage node.js packages with Yarn +description: + - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/) +author: + - "David Gunter (@verkaufer)" + - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - The name of a node.js library to install + - If omitted all packages in package.json are installed. + - To globally install from local node.js library. Prepend "file:" to the path of the node.js library. + required: false + path: + type: path + description: + - The base path where Node.js libraries will be installed. + - This is where the node_modules folder lives. + required: false + version: + type: str + description: + - The version of the library to be installed. + - Must be in semver format. If "latest" is desired, use "state" arg instead + required: false + global: + description: + - Install the node.js library globally + required: false + default: false + type: bool + executable: + type: path + description: + - The executable location for yarn. + required: false + ignore_scripts: + description: + - Use the --ignore-scripts flag when installing. + required: false + type: bool + default: false + production: + description: + - Install dependencies in production mode. + - Yarn will ignore any dependencies under devDependencies in package.json + required: false + type: bool + default: false + registry: + type: str + description: + - The registry to install modules from. + required: false + state: + type: str + description: + - Installation state of the named node.js library + - If absent is selected, a name option must be provided + required: false + default: present + choices: [ "present", "absent", "latest" ] +requirements: + - Yarn installed in bin path (typically /usr/local/bin) +''' + +EXAMPLES = ''' +- name: Install "imagemin" node.js package. + community.general.yarn: + name: imagemin + path: /app/location + +- name: Install "imagemin" node.js package on version 5.3.1 + community.general.yarn: + name: imagemin + version: '5.3.1' + path: /app/location + +- name: Install "imagemin" node.js package globally. + community.general.yarn: + name: imagemin + global: true + +- name: Remove the globally-installed package "imagemin". + community.general.yarn: + name: imagemin + global: true + state: absent + +- name: Install "imagemin" node.js package from custom registry. + community.general.yarn: + name: imagemin + registry: 'http://registry.mysite.com' + +- name: Install packages based on package.json. + community.general.yarn: + path: /app/location + +- name: Update all packages in package.json to their latest version. + community.general.yarn: + path: /app/location + state: latest +''' + +RETURN = ''' +changed: + description: Whether Yarn changed any package data + returned: always + type: bool + sample: true +msg: + description: Provides an error message if Yarn syntax was incorrect + returned: failure + type: str + sample: "Package must be explicitly named when uninstalling." +invocation: + description: Parameters and values used during execution + returned: success + type: dict + sample: { + "module_args": { + "executable": null, + "globally": false, + "ignore_scripts": false, + "name": null, + "path": "/some/path/folder", + "production": false, + "registry": null, + "state": "present", + "version": null + } + } +out: + description: Output generated from Yarn. + returned: always + type: str + sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4] + Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s." +''' + +import os +import json + +from ansible.module_utils.basic import AnsibleModule + + +class Yarn(object): + + def __init__(self, module, **kwargs): + self.module = module + self.globally = kwargs['globally'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + self.executable = kwargs['executable'] + + # Specify a version of package if version arg passed in + self.name_version = None + + if kwargs['version'] and self.name is not None: + self.name_version = self.name + '@' + str(self.version) + elif self.name is not None: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True, unsupported_with_global=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + + with_global_arg = self.globally and not unsupported_with_global + + if with_global_arg: + # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`) + args.insert(0, 'global') + + cmd = self.executable + args + + if self.production: + cmd.append('--production') + if self.ignore_scripts: + cmd.append('--ignore-scripts') + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path and not with_global_arg: + if not os.path.exists(self.path): + # Module will make directory if not exists. + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="Path provided %s is not a directory" % self.path) + cwd = self.path + + if not os.path.isfile(os.path.join(self.path, 'package.json')): + self.module.fail_json(msg="Package.json does not exist in provided path.") + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out, err + + return None, None + + def _process_yarn_error(self, err): + try: + # We need to filter for errors, since Yarn warnings are included in stderr + for line in err.splitlines(): + if json.loads(line)['type'] == 'error': + self.module.fail_json(msg=err) + except Exception: + self.module.fail_json(msg="Unexpected stderr output from Yarn: %s" % err, stderr=err) + + def list(self): + cmd = ['list', '--depth=0', '--json'] + + installed = list() + missing = list() + + if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + missing.append(self.name) + return installed, missing + + # `yarn global list` should be treated as "unsupported with global" even though it exists, + # because it only only lists binaries, but `yarn global add` can install libraries too. + result, error = self._exec(cmd, run_in_check_mode=True, check_rc=False, unsupported_with_global=True) + + self._process_yarn_error(error) + + for json_line in result.strip().split('\n'): + data = json.loads(json_line) + if data['type'] == 'tree': + dependencies = data['data']['trees'] + + for dep in dependencies: + name, version = dep['name'].rsplit('@', 1) + installed.append(name) + + if self.name not in installed: + missing.append(self.name) + + return installed, missing + + def install(self): + if self.name_version: + # Yarn has a separate command for installing packages by name... + return self._exec(['add', self.name_version]) + # And one for installing all packages in package.json + return self._exec(['install', '--non-interactive']) + + def update(self): + return self._exec(['upgrade', '--latest']) + + def uninstall(self): + return self._exec(['remove', self.name]) + + def list_outdated(self): + outdated = list() + + if not os.path.isfile(os.path.join(self.path, 'yarn.lock')): + return outdated + + cmd_result, err = self._exec(['outdated', '--json'], True, False, unsupported_with_global=True) + + # the package.json in the global dir is missing a license field, so warnings are expected on stderr + self._process_yarn_error(err) + + if not cmd_result: + return outdated + + outdated_packages_data = cmd_result.splitlines()[1] + + data = json.loads(outdated_packages_data) + + try: + outdated_dependencies = data['data']['body'] + except KeyError: + return outdated + + for dep in outdated_dependencies: + # Outdated dependencies returned as a list of lists, where + # item at index 0 is the name of the dependency + outdated.append(dep[0]) + return outdated + + +def main(): + arg_spec = dict( + name=dict(default=None), + path=dict(default=None, type='path'), + version=dict(default=None), + production=dict(default=False, type='bool'), + executable=dict(default=None, type='path'), + registry=dict(default=None), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default=False, type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + globally = module.params['global'] + production = module.params['production'] + registry = module.params['registry'] + state = module.params['state'] + ignore_scripts = module.params['ignore_scripts'] + + # When installing globally, users should not be able to define a path for installation. + # Require a path if global is False, though! + if path is None and globally is False: + module.fail_json(msg='Path must be specified when not using global arg') + elif path and globally is True: + module.fail_json(msg='Cannot specify path if doing global installation') + + if state == 'absent' and not name: + module.fail_json(msg='Package must be explicitly named when uninstalling.') + if state == 'latest': + version = 'latest' + + if module.params['executable']: + executable = module.params['executable'].split(' ') + else: + executable = [module.get_bin_path('yarn', True)] + + # When installing globally, use the defined path for global node_modules + if globally: + _rc, out, _err = module.run_command(executable + ['global', 'dir'], check_rc=True) + path = out.strip() + + yarn = Yarn(module, + name=name, + path=path, + version=version, + globally=globally, + production=production, + executable=executable, + registry=registry, + ignore_scripts=ignore_scripts) + + changed = False + out = '' + err = '' + if state == 'present': + + if not name: + changed = True + out, err = yarn.install() + else: + installed, missing = yarn.list() + if len(missing): + changed = True + out, err = yarn.install() + + elif state == 'latest': + + if not name: + changed = True + out, err = yarn.install() + else: + installed, missing = yarn.list() + outdated = yarn.list_outdated() + if len(missing): + changed = True + out, err = yarn.install() + if len(outdated): + changed = True + out, err = yarn.update() + else: + # state == absent + installed, missing = yarn.list() + if name in installed: + changed = True + out, err = yarn.uninstall() + + module.exit_json(changed=changed, out=out, err=err) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/yum_versionlock.py b/ansible_collections/community/general/plugins/modules/yum_versionlock.py new file mode 100644 index 000000000..e5d32dc77 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/yum_versionlock.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2018, Florian Paul Azim Hoberg +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: yum_versionlock +version_added: 2.0.0 +short_description: Locks / unlocks a installed package(s) from being updated by yum package manager +description: + - This module adds installed packages to yum versionlock to prevent the package(s) from being updated. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Package name or a list of package names with optional wildcards. + type: list + required: true + elements: str + state: + description: + - If state is C(present), package(s) will be added to yum versionlock list. + - If state is C(absent), package(s) will be removed from yum versionlock list. + choices: [ 'absent', 'present' ] + type: str + default: present +notes: + - Requires yum-plugin-versionlock package on the remote node. +requirements: +- yum +- yum-versionlock +author: + - Florian Paul Azim Hoberg (@gyptazy) + - Amin Vakil (@aminvakil) +''' + +EXAMPLES = r''' +- name: Prevent Apache / httpd from being updated + community.general.yum_versionlock: + state: present + name: httpd + +- name: Prevent multiple packages from being updated + community.general.yum_versionlock: + state: present + name: + - httpd + - nginx + - haproxy + - curl + +- name: Remove lock from Apache / httpd to be updated again + community.general.yum_versionlock: + state: absent + name: httpd +''' + +RETURN = r''' +packages: + description: A list of package(s) in versionlock list. + returned: success + type: list + elements: str + sample: [ 'httpd' ] +state: + description: State of package(s). + returned: success + type: str + sample: present +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from fnmatch import fnmatch + +# on DNF-based distros, yum is a symlink to dnf, so we try to handle their different entry formats. +NEVRA_RE_YUM = re.compile(r'^(?P!)?(?P\d+):(?P.+)-' + r'(?P.+)-(?P.+)\.(?P.+)$') +NEVRA_RE_DNF = re.compile(r"^(?P!)?(?P.+)-(?P\d+):(?P.+)-" + r"(?P.+)\.(?P.+)$") + + +class YumVersionLock: + def __init__(self, module): + self.module = module + self.params = module.params + self.yum_bin = module.get_bin_path('yum', required=True) + + def get_versionlock_packages(self): + """ Get an overview of all packages on yum versionlock """ + rc, out, err = self.module.run_command([self.yum_bin, "versionlock", "list"]) + if rc == 0: + return out + elif rc == 1 and 'o such command:' in err: + self.module.fail_json(msg="Error: Please install rpm package yum-plugin-versionlock : " + to_native(err) + to_native(out)) + self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) + + def ensure_state(self, packages, command): + """ Ensure packages state """ + rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages) + if rc == 0: + return True + self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) + + +def match(entry, name): + m = NEVRA_RE_YUM.match(entry) + if not m: + m = NEVRA_RE_DNF.match(entry) + if not m: + return False + return fnmatch(m.group("name"), name) + + +def main(): + """ start main program to add/remove a package to yum versionlock""" + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='list', elements='str'), + ), + supports_check_mode=True + ) + + state = module.params['state'] + packages = module.params['name'] + changed = False + + yum_v = YumVersionLock(module) + + # Get an overview of all packages that have a version lock + versionlock_packages = yum_v.get_versionlock_packages() + + # Ensure versionlock state of packages + packages_list = [] + if state in ('present', ): + command = 'add' + for single_pkg in packages: + if not any(match(pkg, single_pkg) for pkg in versionlock_packages.split()): + packages_list.append(single_pkg) + if packages_list: + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) + elif state in ('absent', ): + command = 'delete' + for single_pkg in packages: + if any(match(pkg, single_pkg) for pkg in versionlock_packages.split()): + packages_list.append(single_pkg) + if packages_list: + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) + + module.exit_json( + changed=changed, + meta={ + "packages": packages, + "state": state + } + ) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/zfs.py b/ansible_collections/community/general/plugins/modules/zfs.py new file mode 100644 index 000000000..4cd79c36e --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zfs.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Johan Wiren +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: zfs +short_description: Manage zfs +description: + - Manages ZFS file systems, volumes, clones and snapshots +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - In certain situations it may report a task as changed that will not be reported + as changed when C(check_mode) is disabled. + - For example, this might occur when the zpool C(altroot) option is set or when + a size is written using human-readable notation, such as C(1M) or C(1024K), + instead of as an unqualified byte count, such as C(1048576). + diff_mode: + support: full +options: + name: + description: + - File system, snapshot or volume name e.g. C(rpool/myfs). + required: true + type: str + state: + description: + - Whether to create (C(present)), or remove (C(absent)) a + file system, snapshot or volume. All parents/children + will be created/destroyed as needed to reach the desired state. + choices: [ absent, present ] + required: true + type: str + origin: + description: + - Snapshot from which to create a clone. + type: str + extra_zfs_properties: + description: + - A dictionary of zfs properties to be set. + - See the zfs(8) man page for more information. + type: dict + default: {} +author: +- Johan Wiren (@johanwiren) +''' + +EXAMPLES = ''' +- name: Create a new file system called myfs in pool rpool with the setuid property turned off + community.general.zfs: + name: rpool/myfs + state: present + extra_zfs_properties: + setuid: 'off' + +- name: Create a new volume called myvol in pool rpool. + community.general.zfs: + name: rpool/myvol + state: present + extra_zfs_properties: + volsize: 10M + +- name: Create a snapshot of rpool/myfs file system. + community.general.zfs: + name: rpool/myfs@mysnapshot + state: present + +- name: Create a new file system called myfs2 with snapdir enabled + community.general.zfs: + name: rpool/myfs2 + state: present + extra_zfs_properties: + snapdir: enabled + +- name: Create a new file system by cloning a snapshot + community.general.zfs: + name: rpool/cloned_fs + state: present + origin: rpool/myfs@mysnapshot + +- name: Destroy a filesystem + community.general.zfs: + name: rpool/myfs + state: absent +''' + +import os + +from ansible.module_utils.basic import AnsibleModule + + +class Zfs(object): + + def __init__(self, module, name, properties): + self.module = module + self.name = name + self.properties = properties + self.changed = False + self.zfs_cmd = module.get_bin_path('zfs', True) + self.zpool_cmd = module.get_bin_path('zpool', True) + self.pool = name.split('/')[0].split('@')[0] + self.is_solaris = os.uname()[0] == 'SunOS' + self.is_openzfs = self.check_openzfs() + self.enhanced_sharing = self.check_enhanced_sharing() + + def check_openzfs(self): + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if version == '-': + return True + if int(version) == 5000: + return True + return False + + def check_enhanced_sharing(self): + if self.is_solaris and not self.is_openzfs: + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if int(version) >= 34: + return True + return False + + def exists(self): + cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] + rc, dummy, dummy = self.module.run_command(cmd) + return rc == 0 + + def create(self): + if self.module.check_mode: + self.changed = True + return + properties = self.properties + origin = self.module.params.get('origin') + cmd = [self.zfs_cmd] + + if "@" in self.name: + action = 'snapshot' + elif origin: + action = 'clone' + else: + action = 'create' + + cmd.append(action) + + if action in ['create', 'clone']: + cmd += ['-p'] + + if properties: + for prop, value in properties.items(): + if prop == 'volsize': + cmd += ['-V', value] + elif prop == 'volblocksize': + cmd += ['-b', value] + else: + cmd += ['-o', '%s=%s' % (prop, value)] + if origin and action == 'clone': + cmd.append(origin) + cmd.append(self.name) + self.module.run_command(cmd, check_rc=True) + self.changed = True + + def destroy(self): + if self.module.check_mode: + self.changed = True + return + cmd = [self.zfs_cmd, 'destroy', '-R', self.name] + self.module.run_command(cmd, check_rc=True) + self.changed = True + + def set_property(self, prop, value): + if self.module.check_mode: + self.changed = True + return + cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] + self.module.run_command(cmd, check_rc=True) + + def set_properties_if_changed(self): + diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} + current_properties = self.get_current_properties() + for prop, value in self.properties.items(): + current_value = current_properties.get(prop, None) + if current_value != value: + self.set_property(prop, value) + diff['before']['extra_zfs_properties'][prop] = current_value + diff['after']['extra_zfs_properties'][prop] = value + if self.module.check_mode: + return diff + updated_properties = self.get_current_properties() + for prop in self.properties: + value = updated_properties.get(prop, None) + if value is None: + self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) + if current_properties.get(prop, None) != value: + self.changed = True + if prop in diff['after']['extra_zfs_properties']: + diff['after']['extra_zfs_properties'][prop] = value + return diff + + def get_current_properties(self): + cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] + if self.enhanced_sharing: + cmd += ['-e'] + cmd += ['all', self.name] + rc, out, err = self.module.run_command(cmd) + properties = dict() + for line in out.splitlines(): + prop, value, source = line.split('\t') + # include source '-' so that creation-only properties are not removed + # to avoids errors when the dataset already exists and the property is not changed + # this scenario is most likely when the same playbook is run more than once + if source in ('local', 'received', '-'): + properties[prop] = value + # Add alias for enhanced sharing properties + if self.enhanced_sharing: + properties['sharenfs'] = properties.get('share.nfs', None) + properties['sharesmb'] = properties.get('share.smb', None) + return properties + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']), + origin=dict(type='str'), + extra_zfs_properties=dict(type='dict', default={}), + ), + supports_check_mode=True, + ) + + state = module.params.get('state') + name = module.params.get('name') + + if module.params.get('origin') and '@' in name: + module.fail_json(msg='cannot specify origin when operating on a snapshot') + + # Reverse the boolification of zfs properties + for prop, value in module.params['extra_zfs_properties'].items(): + if isinstance(value, bool): + if value is True: + module.params['extra_zfs_properties'][prop] = 'on' + else: + module.params['extra_zfs_properties'][prop] = 'off' + else: + module.params['extra_zfs_properties'][prop] = value + + result = dict( + name=name, + state=state, + ) + + zfs = Zfs(module, name, module.params['extra_zfs_properties']) + + if state == 'present': + if zfs.exists(): + result['diff'] = zfs.set_properties_if_changed() + else: + zfs.create() + result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} + + elif state == 'absent': + if zfs.exists(): + zfs.destroy() + result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} + else: + result['diff'] = {} + + result['diff']['before_header'] = name + result['diff']['after_header'] = name + + result.update(zfs.properties) + result['changed'] = zfs.changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py new file mode 100644 index 000000000..0536f1a28 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: zfs_delegate_admin +short_description: Manage ZFS delegated administration (user admin privileges) +description: + - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS + operations normally restricted to the superuser. + - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options. + - This module attempts to adhere to the behavior of the command line tool as much as possible. +requirements: + - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all + versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0." +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - File system or volume name e.g. C(rpool/myfs). + required: true + type: str + state: + description: + - Whether to allow (C(present)), or unallow (C(absent)) a permission. + - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required. + - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. + choices: [ absent, present ] + default: present + type: str + users: + description: + - List of users to whom permission(s) should be granted. + type: list + elements: str + groups: + description: + - List of groups to whom permission(s) should be granted. + type: list + elements: str + everyone: + description: + - Apply permissions to everyone. + type: bool + default: false + permissions: + description: + - The list of permission(s) to delegate (required if C(state) is C(present)). + - Supported permissions depend on the ZFS version in use. See for example + U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. + type: list + elements: str + local: + description: + - Apply permissions to C(name) locally (C(zfs allow -l)). + type: bool + descendents: + description: + - Apply permissions to C(name)'s descendents (C(zfs allow -d)). + type: bool + recursive: + description: + - Unallow permissions recursively (ignored when C(state) is C(present)). + type: bool + default: false +author: +- Nate Coraor (@natefoo) +''' + +EXAMPLES = r''' +- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope + community.general.zfs_delegate_admin: + name: rpool/myfs + users: adm + permissions: allow,unallow + +- name: Grant `zfs send` to everyone, plus the group `backup` + community.general.zfs_delegate_admin: + name: rpool/myvol + groups: backup + everyone: true + permissions: send + +- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only + community.general.zfs_delegate_admin: + name: rpool/myfs + users: foo,bar + permissions: send,receive + local: true + +- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain) + community.general.zfs_delegate_admin: + name: rpool/myfs + everyone: true + state: absent +''' + +# This module does not return anything other than the standard +# changed/state/msg/stdout +RETURN = ''' +''' + +from itertools import product + +from ansible.module_utils.basic import AnsibleModule + + +class ZfsDelegateAdmin(object): + def __init__(self, module): + self.module = module + self.name = module.params.get('name') + self.state = module.params.get('state') + self.users = module.params.get('users') + self.groups = module.params.get('groups') + self.everyone = module.params.get('everyone') + self.perms = module.params.get('permissions') + self.scope = None + self.changed = False + self.initial_perms = None + self.subcommand = 'allow' + self.recursive_opt = [] + self.run_method = self.update + + self.setup(module) + + def setup(self, module): + """ Validate params and set up for run. + """ + if self.state == 'absent': + self.subcommand = 'unallow' + if module.params.get('recursive'): + self.recursive_opt = ['-r'] + + local = module.params.get('local') + descendents = module.params.get('descendents') + if (local and descendents) or (not local and not descendents): + self.scope = 'ld' + elif local: + self.scope = 'l' + elif descendents: + self.scope = 'd' + else: + self.module.fail_json(msg='Impossible value for local and descendents') + + if not (self.users or self.groups or self.everyone): + if self.state == 'present': + self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set') + elif self.state == 'absent': + self.run_method = self.clear + # ansible ensures the else cannot happen here + + self.zfs_path = module.get_bin_path('zfs', True) + + @property + def current_perms(self): + """ Parse the output of `zfs allow ` to retrieve current permissions. + """ + out = self.run_zfs_raw(subcommand='allow') + perms = { + 'l': {'u': {}, 'g': {}, 'e': []}, + 'd': {'u': {}, 'g': {}, 'e': []}, + 'ld': {'u': {}, 'g': {}, 'e': []}, + } + linemap = { + 'Local permissions:': 'l', + 'Descendent permissions:': 'd', + 'Local+Descendent permissions:': 'ld', + } + scope = None + for line in out.splitlines(): + scope = linemap.get(line, scope) + if not scope: + continue + if ' (unknown: ' in line: + line = line.replace('(unknown: ', '', 1) + line = line.replace(')', '', 1) + try: + if line.startswith('\tuser ') or line.startswith('\tgroup '): + ent_type, ent, cur_perms = line.split() + perms[scope][ent_type[0]][ent] = cur_perms.split(',') + elif line.startswith('\teveryone '): + perms[scope]['e'] = line.split()[1].split(',') + except ValueError: + self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line) + return perms + + def run_zfs_raw(self, subcommand=None, args=None): + """ Run a raw zfs command, fail on error. + """ + cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name] + rc, out, err = self.module.run_command(cmd) + if rc: + self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err)) + return out + + def run_zfs(self, args): + """ Run zfs allow/unallow with appropriate options as per module arguments. + """ + args = self.recursive_opt + ['-' + self.scope] + args + if self.perms: + args.append(','.join(self.perms)) + return self.run_zfs_raw(args=args) + + def clear(self): + """ Called by run() to clear all permissions. + """ + changed = False + stdout = '' + for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')): + for ent in self.initial_perms[scope][ent_type].keys(): + stdout += self.run_zfs(['-%s' % ent_type, ent]) + changed = True + for scope in ('ld', 'l', 'd'): + if self.initial_perms[scope]['e']: + stdout += self.run_zfs(['-e']) + changed = True + return (changed, stdout) + + def update(self): + """ Update permissions as per module arguments. + """ + stdout = '' + for ent_type, entities in (('u', self.users), ('g', self.groups)): + if entities: + stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)]) + if self.everyone: + stdout += self.run_zfs(['-e']) + return (self.initial_perms != self.current_perms, stdout) + + def run(self): + """ Run an operation, return results for Ansible. + """ + exit_args = {'state': self.state} + self.initial_perms = self.current_perms + exit_args['changed'], stdout = self.run_method() + if exit_args['changed']: + exit_args['msg'] = 'ZFS delegated admin permissions updated' + exit_args['stdout'] = stdout + self.module.exit_json(**exit_args) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + users=dict(type='list', elements='str'), + groups=dict(type='list', elements='str'), + everyone=dict(type='bool', default=False), + permissions=dict(type='list', elements='str'), + local=dict(type='bool'), + descendents=dict(type='bool'), + recursive=dict(type='bool', default=False), + ), + supports_check_mode=False, + required_if=[('state', 'present', ['permissions'])], + ) + zfs_delegate_admin = ZfsDelegateAdmin(module) + zfs_delegate_admin.run() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/zfs_facts.py b/ansible_collections/community/general/plugins/modules/zfs_facts.py new file mode 100644 index 000000000..bb4530c47 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zfs_facts.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: zfs_facts +short_description: Gather facts about ZFS datasets +description: + - Gather facts from ZFS dataset properties. +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + name: + description: + - ZFS dataset name. + required: true + aliases: [ "ds", "dataset" ] + type: str + recurse: + description: + - Specifies if properties for any children should be recursively + displayed. + type: bool + default: false + parsable: + description: + - Specifies if property values should be displayed in machine + friendly format. + type: bool + default: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. + For more information about dataset properties, check zfs(1M) man page. + default: all + type: str + type: + description: + - Specifies which datasets types to display. Multiple values have to be + provided in comma-separated form. + choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] + default: all + type: str + depth: + description: + - Specifies recursion depth. + type: int + default: 0 +''' + +EXAMPLES = ''' +- name: Gather facts about ZFS dataset rpool/export/home + community.general.zfs_facts: + dataset: rpool/export/home + +- name: Report space usage on ZFS filesystems under data/home + community.general.zfs_facts: + name: data/home + recurse: true + type: filesystem + +- ansible.builtin.debug: + msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' + with_items: '{{ ansible_zfs_datasets }}' +''' + +RETURN = ''' +name: + description: ZFS dataset name + returned: always + type: str + sample: rpool/var/spool +parsable: + description: if parsable output should be provided in machine friendly format. + returned: if 'parsable' is set to True + type: bool + sample: true +recurse: + description: if we should recurse over ZFS dataset + returned: if 'recurse' is set to True + type: bool + sample: true +zfs_datasets: + description: ZFS dataset facts + returned: always + type: str + sample: + { + "aclinherit": "restricted", + "aclmode": "discard", + "atime": "on", + "available": "43.8G", + "canmount": "on", + "casesensitivity": "sensitive", + "checksum": "on", + "compression": "off", + "compressratio": "1.00x", + "copies": "1", + "creation": "Thu Jun 16 11:37 2016", + "dedup": "off", + "devices": "on", + "exec": "on", + "filesystem_count": "none", + "filesystem_limit": "none", + "logbias": "latency", + "logicalreferenced": "18.5K", + "logicalused": "3.45G", + "mlslabel": "none", + "mounted": "yes", + "mountpoint": "/rpool", + "name": "rpool", + "nbmand": "off", + "normalization": "none", + "org.openindiana.caiman:install": "ready", + "primarycache": "all", + "quota": "none", + "readonly": "off", + "recordsize": "128K", + "redundant_metadata": "all", + "refcompressratio": "1.00x", + "referenced": "29.5K", + "refquota": "none", + "refreservation": "none", + "reservation": "none", + "secondarycache": "all", + "setuid": "on", + "sharenfs": "off", + "sharesmb": "off", + "snapdir": "hidden", + "snapshot_count": "none", + "snapshot_limit": "none", + "sync": "standard", + "type": "filesystem", + "used": "4.41G", + "usedbychildren": "4.41G", + "usedbydataset": "29.5K", + "usedbyrefreservation": "0", + "usedbysnapshots": "0", + "utf8only": "off", + "version": "5", + "vscan": "off", + "written": "29.5K", + "xattr": "on", + "zoned": "off" + } +''' + +from collections import defaultdict + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + + +SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] + + +class ZFSFacts(object): + def __init__(self, module): + + self.module = module + + self.name = module.params['name'] + self.recurse = module.params['recurse'] + self.parsable = module.params['parsable'] + self.properties = module.params['properties'] + self.type = module.params['type'] + self.depth = module.params['depth'] + + self._datasets = defaultdict(dict) + self.facts = [] + + def dataset_exists(self): + cmd = [self.module.get_bin_path('zfs'), 'list', self.name] + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def get_facts(self): + cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] + if self.parsable: + cmd.append('-p') + if self.recurse: + cmd.append('-r') + if int(self.depth) != 0: + cmd.append('-d') + cmd.append('%s' % self.depth) + if self.type: + cmd.append('-t') + cmd.append(self.type) + cmd.extend(['-o', 'name,property,value', self.properties, self.name]) + + (rc, out, err) = self.module.run_command(cmd) + + if rc == 0: + for line in out.splitlines(): + dataset, property, value = line.split('\t') + + self._datasets[dataset].update({property: value}) + + for k, v in iteritems(self._datasets): + v.update({'name': k}) + self.facts.append(v) + + return {'ansible_zfs_datasets': self.facts} + else: + self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, + stderr=err, + rc=rc) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['ds', 'dataset'], type='str'), + recurse=dict(required=False, default=False, type='bool'), + parsable=dict(required=False, default=False, type='bool'), + properties=dict(required=False, default='all', type='str'), + type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), + depth=dict(required=False, default=0, type='int') + ), + supports_check_mode=True + ) + + zfs_facts = ZFSFacts(module) + + result = {} + result['changed'] = False + result['name'] = zfs_facts.name + + if zfs_facts.parsable: + result['parsable'] = zfs_facts.parsable + + if zfs_facts.recurse: + result['recurse'] = zfs_facts.recurse + + if zfs_facts.dataset_exists(): + result['ansible_facts'] = zfs_facts.get_facts() + else: + module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/znode.py b/ansible_collections/community/general/plugins/modules/znode.py new file mode 100644 index 000000000..f5aa54ef8 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/znode.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright 2015 WP Engine, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: znode +short_description: Create, delete, retrieve, and update znodes using ZooKeeper +description: + - Create, delete, retrieve, and update znodes using ZooKeeper. +attributes: + check_mode: + support: none + diff_mode: + support: none +extends_documentation_fragment: + - community.general.attributes +options: + hosts: + description: + - A list of ZooKeeper servers (format '[server]:[port]'). + required: true + type: str + name: + description: + - The path of the znode. + required: true + type: str + value: + description: + - The value assigned to the znode. + type: str + op: + description: + - An operation to perform. Mutually exclusive with state. + choices: [ get, wait, list ] + type: str + state: + description: + - The state to enforce. Mutually exclusive with op. + choices: [ present, absent ] + type: str + timeout: + description: + - The amount of time to wait for a node to appear. + default: 300 + type: int + recursive: + description: + - Recursively delete node and all its children. + type: bool + default: false + auth_scheme: + description: + - 'Authentication scheme.' + choices: [ digest, sasl ] + type: str + default: "digest" + required: false + version_added: 5.8.0 + auth_credential: + description: + - The authentication credential value. Depends on I(auth_scheme). + - The format for I(auth_scheme=digest) is C(user:password), + and the format for I(auth_scheme=sasl) is C(user:password). + type: str + required: false + version_added: 5.8.0 + use_tls: + description: + - Using TLS/SSL or not. + type: bool + default: false + required: false + version_added: '6.5.0' +requirements: + - kazoo >= 2.1 + - python >= 2.6 +author: "Trey Perry (@treyperry)" +''' + +EXAMPLES = """ +- name: Creating or updating a znode with a given value + community.general.znode: + hosts: 'localhost:2181' + name: /mypath + value: myvalue + state: present + +- name: Getting the value and stat structure for a znode + community.general.znode: + hosts: 'localhost:2181' + name: /mypath + op: get + +- name: Getting the value and stat structure for a znode using digest authentication + community.general.znode: + hosts: 'localhost:2181' + auth_credential: 'user1:s3cr3t' + name: /secretmypath + op: get + +- name: Listing a particular znode's children + community.general.znode: + hosts: 'localhost:2181' + name: /zookeeper + op: list + +- name: Waiting 20 seconds for a znode to appear at path /mypath + community.general.znode: + hosts: 'localhost:2181' + name: /mypath + op: wait + timeout: 20 + +- name: Deleting a znode at path /mypath + community.general.znode: + hosts: 'localhost:2181' + name: /mypath + state: absent + +- name: Creating or updating a znode with a given value on a remote Zookeeper + community.general.znode: + hosts: 'my-zookeeper-node:2181' + name: /mypath + value: myvalue + state: present + delegate_to: 127.0.0.1 +""" + +import time +import traceback + +KAZOO_IMP_ERR = None +try: + from kazoo.client import KazooClient + from kazoo.handlers.threading import KazooTimeoutError + KAZOO_INSTALLED = True +except ImportError: + KAZOO_IMP_ERR = traceback.format_exc() + KAZOO_INSTALLED = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes + + +def main(): + module = AnsibleModule( + argument_spec=dict( + hosts=dict(required=True, type='str'), + name=dict(required=True, type='str'), + value=dict(type='str'), + op=dict(choices=['get', 'wait', 'list']), + state=dict(choices=['present', 'absent']), + timeout=dict(default=300, type='int'), + recursive=dict(default=False, type='bool'), + auth_scheme=dict(default='digest', choices=['digest', 'sasl']), + auth_credential=dict(type='str', no_log=True), + use_tls=dict(default=False, type='bool'), + ), + supports_check_mode=False + ) + + if not KAZOO_INSTALLED: + module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR) + + check = check_params(module.params) + if not check['success']: + module.fail_json(msg=check['msg']) + + zoo = KazooCommandProxy(module) + try: + zoo.start() + except KazooTimeoutError: + module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.') + + command_dict = { + 'op': { + 'get': zoo.get, + 'list': zoo.list, + 'wait': zoo.wait + }, + 'state': { + 'present': zoo.present, + 'absent': zoo.absent + } + } + + command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state' + method = module.params[command_type] + result, result_dict = command_dict[command_type][method]() + zoo.shutdown() + + if result: + module.exit_json(**result_dict) + else: + module.fail_json(**result_dict) + + +def check_params(params): + if not params['state'] and not params['op']: + return {'success': False, 'msg': 'Please define an operation (op) or a state.'} + + if params['state'] and params['op']: + return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'} + + return {'success': True} + + +class KazooCommandProxy(): + def __init__(self, module): + self.module = module + self.zk = KazooClient(module.params['hosts'], use_ssl=module.params['use_tls']) + + def absent(self): + return self._absent(self.module.params['name']) + + def exists(self, znode): + return self.zk.exists(znode) + + def list(self): + children = self.zk.get_children(self.module.params['name']) + return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', + 'znode': self.module.params['name']} + + def present(self): + return self._present(self.module.params['name'], self.module.params['value']) + + def get(self): + return self._get(self.module.params['name']) + + def shutdown(self): + self.zk.stop() + self.zk.close() + + def start(self): + self.zk.start() + if self.module.params['auth_credential']: + self.zk.add_auth(self.module.params['auth_scheme'], self.module.params['auth_credential']) + + def wait(self): + return self._wait(self.module.params['name'], self.module.params['timeout']) + + def _absent(self, znode): + if self.exists(znode): + self.zk.delete(znode, recursive=self.module.params['recursive']) + return True, {'changed': True, 'msg': 'The znode was deleted.'} + else: + return True, {'changed': False, 'msg': 'The znode does not exist.'} + + def _get(self, path): + if self.exists(path): + value, zstat = self.zk.get(path) + stat_dict = {} + for i in dir(zstat): + if not i.startswith('_'): + attr = getattr(zstat, i) + if isinstance(attr, (int, str)): + stat_dict[i] = attr + result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, + 'stat': stat_dict} + else: + result = False, {'msg': 'The requested node does not exist.'} + + return result + + def _present(self, path, value): + if self.exists(path): + (current_value, zstat) = self.zk.get(path) + if value != current_value: + self.zk.set(path, to_bytes(value)) + return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, + 'value': value} + else: + return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} + else: + self.zk.create(path, to_bytes(value), makepath=True) + return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} + + def _wait(self, path, timeout, interval=5): + lim = time.time() + timeout + + while time.time() < lim: + if self.exists(path): + return True, {'msg': 'The node appeared before the configured timeout.', + 'znode': path, 'timeout': timeout} + else: + time.sleep(interval) + + return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, + 'znode': path} + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/zpool_facts.py b/ansible_collections/community/general/plugins/modules/zpool_facts.py new file mode 100644 index 000000000..2477a920b --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zpool_facts.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: zpool_facts +short_description: Gather facts about ZFS pools +description: + - Gather facts from ZFS pool properties. +author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + name: + description: + - ZFS pool name. + type: str + aliases: [ "pool", "zpool" ] + required: false + parsable: + description: + - Specifies if property values should be displayed in machine + friendly format. + type: bool + default: false + required: false + properties: + description: + - Specifies which dataset properties should be queried in comma-separated format. + For more information about dataset properties, check zpool(1M) man page. + type: str + default: all + required: false +''' + +EXAMPLES = ''' +- name: Gather facts about ZFS pool rpool + community.general.zpool_facts: pool=rpool + +- name: Gather space usage about all imported ZFS pools + community.general.zpool_facts: properties='free,size' + +- name: Print gathered information + ansible.builtin.debug: + msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' + with_items: '{{ ansible_zfs_pools }}' +''' + +RETURN = ''' +ansible_facts: + description: Dictionary containing all the detailed information about the ZFS pool facts + returned: always + type: complex + contains: + ansible_zfs_pools: + description: ZFS pool facts + returned: always + type: str + sample: + { + "allocated": "3.46G", + "altroot": "-", + "autoexpand": "off", + "autoreplace": "off", + "bootfs": "rpool/ROOT/openindiana", + "cachefile": "-", + "capacity": "6%", + "comment": "-", + "dedupditto": "0", + "dedupratio": "1.00x", + "delegation": "on", + "expandsize": "-", + "failmode": "wait", + "feature@async_destroy": "enabled", + "feature@bookmarks": "enabled", + "feature@edonr": "enabled", + "feature@embedded_data": "active", + "feature@empty_bpobj": "active", + "feature@enabled_txg": "active", + "feature@extensible_dataset": "enabled", + "feature@filesystem_limits": "enabled", + "feature@hole_birth": "active", + "feature@large_blocks": "enabled", + "feature@lz4_compress": "active", + "feature@multi_vdev_crash_dump": "enabled", + "feature@sha512": "enabled", + "feature@skein": "enabled", + "feature@spacemap_histogram": "active", + "fragmentation": "3%", + "free": "46.3G", + "freeing": "0", + "guid": "15729052870819522408", + "health": "ONLINE", + "leaked": "0", + "listsnapshots": "off", + "name": "rpool", + "readonly": "off", + "size": "49.8G", + "version": "-" + } +name: + description: ZFS pool name + returned: always + type: str + sample: rpool +parsable: + description: if parsable output should be provided in machine friendly format. + returned: if 'parsable' is set to True + type: bool + sample: true +''' + +from collections import defaultdict + +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule + + +class ZPoolFacts(object): + def __init__(self, module): + + self.module = module + self.name = module.params['name'] + self.parsable = module.params['parsable'] + self.properties = module.params['properties'] + self._pools = defaultdict(dict) + self.facts = [] + + def pool_exists(self): + cmd = [self.module.get_bin_path('zpool'), 'list', self.name] + rc, dummy, dummy = self.module.run_command(cmd) + return rc == 0 + + def get_facts(self): + cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] + if self.parsable: + cmd.append('-p') + cmd.append('-o') + cmd.append('name,property,value') + cmd.append(self.properties) + if self.name: + cmd.append(self.name) + + rc, out, err = self.module.run_command(cmd, check_rc=True) + + for line in out.splitlines(): + pool, prop, value = line.split('\t') + + self._pools[pool].update({prop: value}) + + for k, v in iteritems(self._pools): + v.update({'name': k}) + self.facts.append(v) + + return {'ansible_zfs_pools': self.facts} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['pool', 'zpool'], type='str'), + parsable=dict(default=False, type='bool'), + properties=dict(default='all', type='str'), + ), + supports_check_mode=True + ) + + zpool_facts = ZPoolFacts(module) + + result = { + 'changed': False, + 'name': zpool_facts.name, + } + if zpool_facts.parsable: + result['parsable'] = zpool_facts.parsable + + if zpool_facts.name is not None: + if zpool_facts.pool_exists(): + result['ansible_facts'] = zpool_facts.get_facts() + else: + module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name) + else: + result['ansible_facts'] = zpool_facts.get_facts() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/general/plugins/modules/zypper.py b/ansible_collections/community/general/plugins/modules/zypper.py new file mode 100644 index 000000000..b47131d3d --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zypper.py @@ -0,0 +1,607 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Patrick Callahan +# based on +# openbsd_pkg +# Copyright (c) 2013 +# Patrik Lundin +# +# yum +# Copyright (c) 2012, Red Hat, Inc +# Written by Seth Vidal +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: zypper +author: + - "Patrick Callahan (@dirtyharrycallahan)" + - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" +short_description: Manage packages on SUSE and openSUSE +description: + - Manage packages on SUSE and openSUSE using the zypper and rpm tools. + - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - Package name C(name) or package specifier or a list of either. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to + update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using I(state=latest), this can be '*', which updates all installed packages. + required: true + aliases: [ 'pkg' ] + type: list + elements: str + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. + - When using C(dist-upgrade), I(name) should be C('*'). + required: false + choices: [ present, latest, absent, dist-upgrade, installed, removed ] + default: "present" + type: str + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage, application ] + default: "package" + type: str + extra_args_precommand: + required: false + description: + - Add additional global target options to C(zypper). + - Options should be supplied in a single line as if given in the command line. + type: str + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: false + type: bool + disable_recommends: + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(true)) modifies zypper's default behavior; C(false) does + install recommended packages. + required: false + default: true + type: bool + force: + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: false + type: bool + force_resolution: + description: + - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution). + required: false + default: false + type: bool + version_added: '0.2.0' + update_cache: + description: + - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. + required: false + default: false + type: bool + aliases: [ "refresh" ] + oldpackage: + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a + version is specified as part of the package name. + required: false + default: false + type: bool + extra_args: + required: false + description: + - Add additional options to C(zypper) command. + - Options should be supplied in a single line as if given in the command line. + type: str + allow_vendor_change: + type: bool + required: false + default: false + description: + - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command. + version_added: '0.2.0' + replacefiles: + type: bool + required: false + default: false + description: + - Adds C(--replacefiles) option to I(zypper) install/update command. + version_added: '0.2.0' + clean_deps: + type: bool + required: false + default: false + description: + - Adds C(--clean-deps) option to I(zypper) remove command. + version_added: '4.6.0' +notes: + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. +# informational: requirements for nodes +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm +''' + +EXAMPLES = ''' +- name: Install nmap + community.general.zypper: + name: nmap + state: present + +- name: Install apache2 with recommended packages + community.general.zypper: + name: apache2 + state: present + disable_recommends: false + +- name: Apply a given patch + community.general.zypper: + name: openSUSE-2016-128 + state: present + type: patch + +- name: Remove the nmap package + community.general.zypper: + name: nmap + state: absent + +- name: Install the nginx rpm from a remote repo + community.general.zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present + +- name: Install local rpm file + community.general.zypper: + name: /tmp/fancy-software.rpm + state: present + +- name: Update all packages + community.general.zypper: + name: '*' + state: latest + +- name: Apply all available patches + community.general.zypper: + name: '*' + state: latest + type: patch + +- name: Perform a dist-upgrade with additional arguments + community.general.zypper: + name: '*' + state: dist-upgrade + allow_vendor_change: true + extra_args: '--allow-arch-change' + +- name: Perform a installaion of nmap with the install option replacefiles + community.general.zypper: + name: 'nmap' + state: latest + replacefiles: true + +- name: Refresh repositories and update package openssl + community.general.zypper: + name: openssl + state: present + update_cache: true + +- name: "Install specific version (possible comparisons: <, >, <=, >=, =)" + community.general.zypper: + name: 'docker>=1.10' + state: present + +- name: Wait 20 seconds to acquire the lock before failing + community.general.zypper: + name: mosh + state: present + environment: + ZYPP_LOCK_TIMEOUT: 20 +''' + +import os.path +import xml +import re +from xml.dom.minidom import parseString as parseXML +from ansible.module_utils.common.text.converters import to_native + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + + +class Package: + def __init__(self, name, prefix, version): + self.name = name + self.prefix = prefix + self.version = version + self.shouldinstall = (prefix == '+') + + def __str__(self): + return self.prefix + self.name + self.version + + +def split_name_version(name): + """splits of the package name and desired version + + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] + if prefix == '~': + prefix = '-' + + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + if version is None: + version = '' + return prefix, name, version + except Exception: + return prefix, name, '' + + +def get_want_state(names, remove=False): + packages = [] + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix not in ['-', '+']: + if remove: + prefix = '-' + else: + prefix = '+' + packages.append(Package(pname, prefix, version)) + return packages, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) + cmd.extend([p.name for p in packages]) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + try: + dom = parseXML(stdout) + except xml.parsers.expat.ExpatError as exc: + m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), + rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + else: + return {}, rc, stdout, stderr + elif rc in [0, 102, 103, 106]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 102: ZYPPER_EXIT_INF_REBOOT_NEEDED - Returned after a successful installation of a patch which requires reboot of computer. + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + else: + firstrun = False + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] + is_refresh = subcommand == 'refresh' + cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] + if transactional_updates(): + cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd + if m.params['extra_args_precommand']: + args_list = m.params['extra_args_precommand'].split() + cmd.extend(args_list) + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') + + if subcommand == 'search': + cmd.append('--disable-repositories') + + cmd.append(subcommand) + if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['force_resolution']: + cmd.append('--force-resolution') + if m.params['oldpackage']: + cmd.append('--oldpackage') + if m.params['replacefiles']: + cmd.append('--replacefiles') + if subcommand == 'remove': + if m.params['clean_deps']: + cmd.append('--clean-deps') + if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']: + cmd.append('--allow-vendor-change') + if m.params['extra_args']: + args_list = m.params['extra_args'].split(' ') + cmd.extend(args_list) + + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output + else: + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + packages, urls = get_want_state(name) + + # add oldpackage flag when a version is given to allow downgrades + if any(p.version for p in packages): + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + # if a version is given leave the package in to let zypper handle the version + # resolution + packageswithoutversion = [p for p in packages if not p.version] + prerun_state = {} + if packageswithoutversion: + prerun_state = get_installed_state(m, packageswithoutversion) + # generate lists of packages to install or remove + packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] + + if not packages and not urls: + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + # pass packages to zypper + # allow for + or - prefixes in install/remove lists + # also add version specifier if given + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend([str(p) for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + elif m.params['state'] == 'dist-upgrade': + cmdname = 'dist-upgrade' + else: + cmdname = 'update' + + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + packages, urls = get_want_state(name, remove=True) + if any(p.prefix == '+' for p in packages): + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, packages) + packages = [p for p in packages if p.name in prerun_state] + + if not packages: + return None, retvals + + cmd = get_cmd(m, 'remove') + cmd.extend([p.name + p.version for p in packages]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals + + +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + + cmd = get_cmd(m, 'refresh') + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return retvals + + +def get_fs_type_and_readonly_state(mount_point): + with open('/proc/mounts', 'r') as file: + for line in file.readlines(): + fields = line.split() + path = fields[1] + if path == mount_point: + fs = fields[2] + opts = fields[3] + return fs, 'ro' in opts.split(',') + return None + + +def transactional_updates(): + return os.path.exists('/usr/sbin/transactional-update') and get_fs_type_and_readonly_state('/') == ('btrfs', True) + + +# =========================================== +# Main control flow + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['pkg'], type='list', elements='str'), + state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), + type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), + extra_args_precommand=dict(required=False, default=None), + disable_gpg_check=dict(required=False, default=False, type='bool'), + disable_recommends=dict(required=False, default=True, type='bool'), + force=dict(required=False, default=False, type='bool'), + force_resolution=dict(required=False, default=False, type='bool'), + update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'), + oldpackage=dict(required=False, default=False, type='bool'), + extra_args=dict(required=False, default=None), + allow_vendor_change=dict(required=False, default=False, type='bool'), + replacefiles=dict(required=False, default=False, type='bool'), + clean_deps=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=True + ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] + + # remove empty strings from package list + name = list(filter(None, name)) + + # Refresh repositories + if update_cache and not module.check_mode: + retvals = repo_refresh(module) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) + + # Perform requested action + if name == ['*'] and state in ['latest', 'dist-upgrade']: + packages_changed, retvals = package_update_all(module) + elif name != ['*'] and state == 'dist-upgrade': + module.fail_json(msg="Can not dist-upgrade specific packages.") + else: + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') + + retvals['changed'] = retvals['rc'] in [0, 102] and bool(packages_changed) + + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] not in [0, 102]: + module.fail_json(msg="Zypper run failed.", **retvals) + + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] + + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/community/general/plugins/modules/zypper_repository.py b/ansible_collections/community/general/plugins/modules/zypper_repository.py new file mode 100644 index 000000000..cccd9c579 --- /dev/null +++ b/ansible_collections/community/general/plugins/modules/zypper_repository.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, Matthias Vogelgesang +# Copyright (c) 2014, Justin Lecher +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: zypper_repository +author: "Matthias Vogelgesang (@matze)" +short_description: Add and remove Zypper repositories +description: + - Add or remove Zypper repositories on SUSE and openSUSE +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - A name for the repository. Not required when adding repofiles. + type: str + repo: + description: + - URI of the repository or .repo file. Required when state=present. + type: str + state: + description: + - A source string state. + choices: [ "absent", "present" ] + default: "present" + type: str + description: + description: + - A description of the repository + type: str + disable_gpg_check: + description: + - Whether to disable GPG signature checking of + all packages. Has an effect only if state is + I(present). + - Needs zypper version >= 1.6.2. + type: bool + default: false + autorefresh: + description: + - Enable autorefresh of the repository. + type: bool + default: true + aliases: [ "refresh" ] + priority: + description: + - Set priority of repository. Packages will always be installed + from the repository with the smallest priority number. + - Needs zypper version >= 1.12.25. + type: int + overwrite_multiple: + description: + - Overwrite multiple repository entries, if repositories with both name and + URL already exist. + type: bool + default: false + auto_import_keys: + description: + - Automatically import the gpg signing key of the new or changed repository. + - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent). + - Implies runrefresh. + - Only works with C(.repo) files if `name` is given explicitly. + type: bool + default: false + runrefresh: + description: + - Refresh the package list of the given repository. + - Can be used with repo=* to refresh all repositories. + type: bool + default: false + enabled: + description: + - Set repository to enabled (or disabled). + type: bool + default: true + + +requirements: + - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml +''' + +EXAMPLES = ''' +- name: Add NVIDIA repository for graphics drivers + community.general.zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: present + +- name: Remove NVIDIA repository + community.general.zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: absent + +- name: Add python development repository + community.general.zypper_repository: + repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo' + +- name: Refresh all repos + community.general.zypper_repository: + repo: '*' + runrefresh: true + +- name: Add a repo and add its gpg key + community.general.zypper_repository: + repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/' + auto_import_keys: true + +- name: Force refresh of a repository + community.general.zypper_repository: + repo: 'http://my_internal_ci_repo/repo' + name: my_ci_repo + state: present + runrefresh: true +''' + +import traceback + +XML_IMP_ERR = None +try: + from xml.dom.minidom import parseString as parseXML + HAS_XML = True +except ImportError: + XML_IMP_ERR = traceback.format_exc() + HAS_XML = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.six.moves import configparser, StringIO +from io import open + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] + + +def _get_cmd(module, *args): + """Combines the non-interactive zypper command with arguments/subcommands""" + cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] + cmd.extend(args) + + return cmd + + +def _parse_repos(module): + """parses the output of zypper --xmlout repos and return a parse repo dictionary""" + cmd = _get_cmd(module, '--xmlout', 'repos') + + if not HAS_XML: + module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + repos = [] + dom = parseXML(stdout) + repo_list = dom.getElementsByTagName('repo') + for repo in repo_list: + opts = {} + for o in REPO_OPTS: + opts[o] = repo.getAttribute(o) + opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + # A repo can be uniquely identified by an alias + url + repos.append(opts) + return repos + # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined) + elif rc == 6: + return [] + else: + module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) + + +def _repo_changes(module, realrepo, repocmp): + "Check whether the 2 given repos have different settings." + for k in repocmp: + if repocmp[k] and k not in realrepo: + return True + + for k, v in realrepo.items(): + if k in repocmp and repocmp[k]: + valold = str(repocmp[k] or "") + valnew = v or "" + if k == "url": + if '$releasever' in valold or '$releasever' in valnew: + cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$releasever', stdout) + valold = valold.replace('$releasever', stdout) + if '$basearch' in valold or '$basearch' in valnew: + cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$basearch', stdout) + valold = valold.replace('$basearch', stdout) + valold, valnew = valold.rstrip("/"), valnew.rstrip("/") + if valold != valnew: + return True + return False + + +def repo_exists(module, repodata, overwrite_multiple): + """Check whether the repository already exists. + + returns (exists, mod, old_repos) + exists: whether a matching (name, URL) repo exists + mod: whether there are changes compared to the existing repo + old_repos: list of matching repos + """ + existing_repos = _parse_repos(module) + + # look for repos that have matching alias or url to the one searched + repos = [] + for kw in ['alias', 'url']: + name = repodata[kw] + for oldr in existing_repos: + if repodata[kw] == oldr[kw] and oldr not in repos: + repos.append(oldr) + + if len(repos) == 0: + # Repo does not exist yet + return (False, False, None) + elif len(repos) == 1: + # Found an existing repo, look for changes + has_changes = _repo_changes(module, repos[0], repodata) + return (True, has_changes, repos) + elif len(repos) >= 2: + if overwrite_multiple: + # Found two repos and want to overwrite_multiple + return (True, True, repos) + else: + errmsg = 'More than one repo matched "%s": "%s".' % (name, repos) + errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten' + module.fail_json(msg=errmsg) + + +def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): + "Adds the repo, removes old repos before, that would conflict." + repo = repodata['url'] + cmd = _get_cmd(module, 'addrepo', '--check') + if repodata['name']: + cmd.extend(['--name', repodata['name']]) + + # priority on addrepo available since 1.12.25 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336 + if repodata['priority']: + if zypper_version >= LooseVersion('1.12.25'): + cmd.extend(['--priority', str(repodata['priority'])]) + else: + warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") + + if repodata['enabled'] == '0': + cmd.append('--disable') + + # gpgcheck available since 1.6.2 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449 + # the default changed in the past, so don't assume a default here and show warning for old zypper versions + if zypper_version >= LooseVersion('1.6.2'): + if repodata['gpgcheck'] == '1': + cmd.append('--gpgcheck') + else: + cmd.append('--no-gpgcheck') + else: + warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") + + if repodata['autorefresh'] == '1': + cmd.append('--refresh') + + cmd.append(repo) + + if not repo.endswith('.repo'): + cmd.append(repodata['alias']) + + if old_repos is not None: + for oldrepo in old_repos: + remove_repo(module, oldrepo['url']) + + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + return rc, stdout, stderr + + +def remove_repo(module, repo): + "Removes the repo." + cmd = _get_cmd(module, 'removerepo', repo) + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr + + +def get_zypper_version(module): + rc, stdout, stderr = module.run_command([module.get_bin_path('zypper', required=True), '--version']) + if rc != 0 or not stdout.startswith('zypper '): + return LooseVersion('1.0') + return LooseVersion(stdout.split()[1]) + + +def runrefreshrepo(module, auto_import_keys=False, shortname=None): + "Forces zypper to refresh repo metadata." + if auto_import_keys: + cmd = _get_cmd(module, '--gpg-auto-import-keys', 'refresh', '--force') + else: + cmd = _get_cmd(module, 'refresh', '--force') + if shortname is not None: + cmd.extend(['-r', shortname]) + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + repo=dict(required=False), + state=dict(choices=['present', 'absent'], default='present'), + runrefresh=dict(required=False, default=False, type='bool'), + description=dict(required=False), + disable_gpg_check=dict(required=False, default=False, type='bool'), + autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']), + priority=dict(required=False, type='int'), + enabled=dict(required=False, default=True, type='bool'), + overwrite_multiple=dict(required=False, default=False, type='bool'), + auto_import_keys=dict(required=False, default=False, type='bool'), + ), + supports_check_mode=False, + required_one_of=[['state', 'runrefresh']], + ) + + repo = module.params['repo'] + alias = module.params['name'] + state = module.params['state'] + overwrite_multiple = module.params['overwrite_multiple'] + auto_import_keys = module.params['auto_import_keys'] + runrefresh = module.params['runrefresh'] + + zypper_version = get_zypper_version(module) + warnings = [] # collect warning messages for final output + + repodata = { + 'url': repo, + 'alias': alias, + 'name': module.params['description'], + 'priority': module.params['priority'], + } + # rewrite bools in the language that zypper lr -x provides for easier comparison + if module.params['enabled']: + repodata['enabled'] = '1' + else: + repodata['enabled'] = '0' + if module.params['disable_gpg_check']: + repodata['gpgcheck'] = '0' + else: + repodata['gpgcheck'] = '1' + if module.params['autorefresh']: + repodata['autorefresh'] = '1' + else: + repodata['autorefresh'] = '0' + + def exit_unchanged(): + module.exit_json(changed=False, repodata=repodata, state=state) + + # Check run-time module parameters + if repo == '*' or alias == '*': + if runrefresh: + runrefreshrepo(module, auto_import_keys) + module.exit_json(changed=False, runrefresh=True) + else: + module.fail_json(msg='repo=* can only be used with the runrefresh option.') + + if state == 'present' and not repo: + module.fail_json(msg='Module option state=present requires repo') + if state == 'absent' and not repo and not alias: + module.fail_json(msg='Alias or repo parameter required when state=absent') + + if repo and repo.endswith('.repo'): + if alias: + module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files') + else: + if not alias and state == "present": + module.fail_json(msg='Name required when adding non-repo files.') + + # Download / Open and parse .repo file to ensure idempotency + if repo and repo.endswith('.repo'): + if repo.startswith(('http://', 'https://')): + response, info = fetch_url(module=module, url=repo, force=True) + if not response or info['status'] != 200: + module.fail_json(msg='Error downloading .repo file from provided URL') + repofile_text = to_text(response.read(), errors='surrogate_or_strict') + else: + try: + with open(repo, encoding='utf-8') as file: + repofile_text = file.read() + except IOError: + module.fail_json(msg='Error opening .repo file from provided path') + + repofile = configparser.ConfigParser() + try: + repofile.readfp(StringIO(repofile_text)) + except configparser.Error: + module.fail_json(msg='Invalid format, .repo file could not be parsed') + + # No support for .repo file with zero or more than one repository + if len(repofile.sections()) != 1: + err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections()) + module.fail_json(msg=err) + + section = repofile.sections()[0] + repofile_items = dict(repofile.items(section)) + # Only proceed if at least baseurl is available + if 'baseurl' not in repofile_items: + module.fail_json(msg='No baseurl found in .repo file') + + # Set alias (name) and url based on values from .repo file + alias = section + repodata['alias'] = section + repodata['url'] = repofile_items['baseurl'] + + # If gpgkey is part of the .repo file, auto import key + if 'gpgkey' in repofile_items: + auto_import_keys = True + + # Map additional values, if available + if 'name' in repofile_items: + repodata['name'] = repofile_items['name'] + if 'enabled' in repofile_items: + repodata['enabled'] = repofile_items['enabled'] + if 'autorefresh' in repofile_items: + repodata['autorefresh'] = repofile_items['autorefresh'] + if 'gpgcheck' in repofile_items: + repodata['gpgcheck'] = repofile_items['gpgcheck'] + + exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple) + + if alias: + shortname = alias + else: + shortname = repo + + if state == 'present': + if exists and not mod: + if runrefresh: + runrefreshrepo(module, auto_import_keys, shortname) + exit_unchanged() + rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings) + if rc == 0 and (runrefresh or auto_import_keys): + runrefreshrepo(module, auto_import_keys, shortname) + elif state == 'absent': + if not exists: + exit_unchanged() + rc, stdout, stderr = remove_repo(module, shortname) + + if rc == 0: + module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings) + else: + module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings) + + +if __name__ == '__main__': + main() -- cgit v1.2.3