summaryrefslogtreecommitdiffstats
path: root/ansible_collections/openstack
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/openstack
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/openstack')
-rw-r--r--ansible_collections/openstack/cloud/CHANGELOG.rst211
-rw-r--r--ansible_collections/openstack/cloud/CONTRIBUTING.rst40
-rw-r--r--ansible_collections/openstack/cloud/FILES.json798
-rw-r--r--ansible_collections/openstack/cloud/MANIFEST.json4
-rw-r--r--ansible_collections/openstack/cloud/README.md245
-rw-r--r--ansible_collections/openstack/cloud/docs/branching.md115
-rw-r--r--ansible_collections/openstack/cloud/docs/contributing.md191
-rw-r--r--ansible_collections/openstack/cloud/docs/devstack.md107
-rw-r--r--ansible_collections/openstack/cloud/docs/openstack_guidelines.rst68
-rw-r--r--ansible_collections/openstack/cloud/docs/releasing.md125
-rw-r--r--ansible_collections/openstack/cloud/docs/reviewing.md66
-rw-r--r--ansible_collections/openstack/cloud/meta/runtime.yml547
-rw-r--r--ansible_collections/openstack/cloud/plugins/doc_fragments/openstack.py13
-rw-r--r--ansible_collections/openstack/cloud/plugins/inventory/openstack.py705
-rw-r--r--ansible_collections/openstack/cloud/plugins/module_utils/ironic.py7
-rw-r--r--ansible_collections/openstack/cloud/plugins/module_utils/openstack.py78
-rw-r--r--ansible_collections/openstack/cloud/plugins/module_utils/resource.py237
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/address_scope.py101
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/auth.py34
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_deploy_template.py198
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_inspect.py375
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_node.py933
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_node_action.py428
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_node_info.py450
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_port.py350
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/baremetal_port_info.py170
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/catalog_service.py225
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/catalog_service_info.py100
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/coe_cluster.py586
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/coe_cluster_template.py792
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/compute_flavor.py485
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/compute_flavor_access.py206
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/compute_flavor_info.py290
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/compute_service_info.py152
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/config.py83
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/container.py207
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/dns_zone.py417
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/dns_zone_info.py261
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/endpoint.py95
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/federation_idp.py247
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/federation_idp_info.py90
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/federation_mapping.py185
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/federation_mapping_info.py87
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/floating_ip.py587
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/floating_ip_info.py116
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/group_assignment.py108
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/host_aggregate.py68
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_domain.py228
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_domain_info.py127
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_group.py187
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_group_info.py183
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_role.py154
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_role_info.py76
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_user.py368
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/identity_user_info.py191
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/image.py681
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/image_info.py420
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/keypair.py82
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/keypair_info.py21
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol.py218
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol_info.py96
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/lb_health_monitor.py575
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/lb_listener.py572
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/lb_member.py510
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/lb_pool.py513
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/loadbalancer.py1196
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/network.py267
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/networks_info.py137
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policies_info.py259
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policy.py417
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/object.py378
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/object_container.py456
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_auth.py62
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_client_config.py76
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster.py292
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster_template.py388
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_flavor_info.py247
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_floating_ip.py307
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_group.py157
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_group_info.py150
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_image.py270
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_image_info.py204
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_ironic.py441
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_ironic_inspect.py133
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_ironic_node.py362
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keypair.py156
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain.py175
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain_info.py119
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_endpoint.py218
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol.py187
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol_info.py98
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider.py220
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider_info.py89
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping.py197
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping_info.py88
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_role.py113
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_keystone_service.py190
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_listener.py287
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_loadbalancer.py691
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_member.py235
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_network.py245
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_networks_info.py149
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_nova_flavor.py274
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_nova_host_aggregate.py236
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_object.py120
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_pool.py263
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_port.py530
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_port_info.py210
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_project.py220
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_project_access.py193
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_project_info.py156
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_quota.py466
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_recordset.py260
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_router.py571
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_routers_info.py194
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_security_group.py153
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_security_group_rule.py389
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server.py805
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server_action.py236
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server_group.py162
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server_info.py96
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server_metadata.py165
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_server_volume.py139
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_stack.py248
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_subnet.py364
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_subnets_info.py164
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_user.py263
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_user_group.py96
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_user_info.py153
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_user_role.py190
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_volume.py263
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_volume_snapshot.py167
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/os_zone.py244
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/port.py956
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/port_info.py231
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/project.py375
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/project_access.py193
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/project_info.py195
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/quota.py628
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/recordset.py41
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/resource.py425
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/resources.py141
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/role_assignment.py278
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/router.py796
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/routers_info.py190
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/security_group.py617
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/security_group_info.py159
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/security_group_rule.py649
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/security_group_rule_info.py239
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server.py1382
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server_action.py375
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server_group.py149
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server_info.py364
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server_metadata.py455
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/server_volume.py203
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/stack.py305
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/stack_info.py246
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/subnet.py652
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/subnet_pool.py458
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/subnets_info.py119
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume.py393
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_backup.py315
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_backup_info.py103
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_info.py174
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_snapshot.py277
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_snapshot_info.py110
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_type.py241
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_type_access.py174
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_type_encryption.py233
-rw-r--r--ansible_collections/openstack/cloud/plugins/modules/volume_type_info.py175
-rw-r--r--ansible_collections/openstack/cloud/requirements.txt2
-rw-r--r--ansible_collections/openstack/cloud/scripts/inventory/openstack.yml24
-rw-r--r--ansible_collections/openstack/cloud/scripts/inventory/openstack_inventory.py281
173 files changed, 19543 insertions, 28611 deletions
diff --git a/ansible_collections/openstack/cloud/CHANGELOG.rst b/ansible_collections/openstack/cloud/CHANGELOG.rst
index 7feb2ad59..858fd93a4 100644
--- a/ansible_collections/openstack/cloud/CHANGELOG.rst
+++ b/ansible_collections/openstack/cloud/CHANGELOG.rst
@@ -1,135 +1,164 @@
-=============================================
-Openstack Cloud Ansilbe modules Release Notes
-=============================================
+==========================================
+Ansible OpenStack Collection Release Notes
+==========================================
.. contents:: Topics
-v1.10.0
-=======
+v2.2.0
+======
Release Summary
---------------
-Enable logging of openstacksdk activities and warn users about incompatible openstacksdk releases when using inventory plugin
+New module for volume_type and bugfixes
-Bugfixes
---------
+Minor Changes
+-------------
-- Add SDK logging option for openstack ansible collections
-- Don't use deprecated distutils from python 3.10
-- Ensure openstacksdk compatibility in inventory plugin
-- Lowered maximum OpenStack SDK version to 0.98.999 in inventory plugin
+- Add volume_encryption_type modules
+- Add volume_type modules
-Known Issues
-------------
+Bugfixes
+--------
-- For compatibility with OpenStack SDK >= 0.99.0 use Ansible OpenStack collection 2.0.0 or later which is currently under development.
-- Release series 1.x.x of this collection is compatible to OpenStack SDK prior to 0.99.0 only.
+- Fix image module filter
+- Fix port module idempotency
+- Fix router module idempotency
-v1.9.1
+v2.1.0
======
Release Summary
---------------
-Bugfix in keypair module
+New module for Ironic and bugfixes
-Bugfixes
---------
+Minor Changes
+-------------
-- Do not remove trailing spaces when reading public key in keypair module
+- Add baremetal_deploy_template module
+- Highlight our mode of operation more prominently
-Known Issues
-------------
+Bugfixes
+--------
-- For compatibility with OpenStack SDK >= 0.99.0 use Ansible OpenStack collection 2.0.0 or later which is currently under development.
-- Release series 1.x.x of this collection is compatible to OpenStack SDK prior to 0.99.0 only.
+- Change security group rules only when instructed to do so
+- Fix for AttributeError: 'dict' object has no attribute 'status'
+- Fix issue with multiple records in recordset
+- Fix mistake in compute_flavor_access notes
+- Fixed private option in inventory plugin
+- Respect description option and delete security group rules first
+- Use true and false instead of yes and no for boolean values
-v1.9.0
+v2.0.0
======
Release Summary
---------------
-This release will enforce openstacksdk<0.99.0, has a dozen modules refactored and several bugs fixed.
+Our new major release 2.0.0 of the Ansible collection for OpenStack clouds aka ``openstack.cloud`` is a complete overhaul of the code base and brings full compatibility with openstacksdk 1.0.0.
-Bugfixes
---------
+Highlights of this release are
+* three new modules which for example provide a generic and uniform API for interacting with OpenStack cloud resources,
+* a complete refactoring of all existing modules bringing dozens of bugfixes, new features as well as consistent
+ and properly documented module results and options,
+* 100% compatibility with openstacksdk's first major release 1.0.0,
+* new guides for contributors from devstack setup over coding guidelines to our release process and
+* massively increased CI coverage with many new integration tests, now covering all modules and plugins.
-- Added support for specifying a maximum version of the OpenStack SDK
-- Constrain filters in compute_service_info to SDK >= 0.53.0
-- Drop username from return values of identity_user_info
-- Fix logic in routers_info
-- Fixed return value disable{d,s}_reason in compute_service_info module
-- Fixed return values in compute_service_info module again
-- Follow up to bump of minimum required OpenStack SDK release to SDK 0.36.0 (Train)
-- Lowered maximum OpenStack SDK version to 0.98.999
-- Move dns zone info to use proxy layer
-- Refactored catalog_service module
-- Refactored endpoint module
-- Refactored host_aggregate module
-- Refactored identity_domain_info module
-- Refactored identity_group_info module
-- Refactored identity_role module
-- Refactored identity_role_info module
-- Refactored identity_user module
-- Refactored identity_user_info module
-- Refactored image_info module
-- Refactored keypair_info module
-- Refactored recordset module
-- Refactored role_assignment module
-- Set owner in image module
-- Support description in sg-rule creation
-- Warn users about us breaking backward compatibility
-
-Known Issues
-------------
-
-- For compatibility with OpenStack SDK >= 0.99.0 use Ansible OpenStack collection 2.0.0 or later which is currently under development.
-- Release series 1.x.x of this collection is compatible to OpenStack SDK prior to 0.99.0 only.
-
-v1.8.0
-======
+Note, this ``2.0.0`` release *breaks backward compatibility* with previous ``1.x.x`` releases!
+* ``2.x.x`` releases of this collection are compatible with openstacksdk ``1.x.x`` and later *only*,
+* ``1.x.x`` releases of this collection are compatible with openstacksdk ``0.x.x`` prior to ``0.99.0`` *only*,
+* ``2.x.x`` releases of are not backward compatible with ``1.x.x`` releases,
+* ``1.x.x`` release series will be in maintenance mode now and receive bugfixes only.
-Release Summary
----------------
-
-Subnet pool module and bugfixes
-
-Bugfixes
---------
+However, this collection as well as openstacksdk continue to be backward compatible with clouds running on older OpenStack releases. For example, it is fine and a fully supported use case to use this 2.0.0 release with clouds based on OpenStack Train, Wallaby or Zed. Feel encouraged to always use the latest releases of this collection and openstacksdk regardless of which version of OpenStack is installed in your cloud.
-- Add 'all_projects' to server_action module
-- Add subnet pool module
-- Bumped minimum required OpenStack SDK release to SDK 0.36.0 (Train)
-- Changed compute_flavor_info module to use OpenStack SDK's proxy layer
-- Dropped deprecated return values in floating_ip_info and assert remaining fields
-- Fix ansible-lint issues for the newest version
-- Fix assertion after stack deletion
-- Handle aggregate host list set to None
-- Reenabled check-import.sh which tests imports to Ansible Galaxy
-- Remove old, unsupported parameters from documentation in image_info module
-- Router - Remove unneeded 'filter' parameter
-- Updated return value docs of compute_service_info module
+This collection is compatible with and tested with Ansible 2.9 and later. However, support for old ``os_*`` short module names such as ``os_server`` have been dropped with this release. You have to call modules using their FQCN (Fully-Qualified Collection Name) such as ``openstack.cloud.server`` instead.
-New Modules
------------
+Many thanks to all contributors who made this release possible. Tens of thousands LOCs have been reviewed and changed and fixed and tested throughout last year. You rock!
-- openstack.cloud.subnet_pool - Create or Delete subnet pools from OpenStack.
+Major Changes
+-------------
-v1.7.2
-======
+- Many modules gained support for Ansible's check mode or have been fixed to properly implement a no change policy during check mode runs.
+- Many modules gained support for updates. In the past, those modules allowed to create and delete OpenStack cloud resources but would ignore when module options had been changed.
+- Many modules such as ``openstack.cloud.server``, ``openstack.cloud.baremetal_node`` and all load-balancer related modules now properly implement the ``wait`` option. For example, when ``wait`` is set to ``true`` then modules will not return until resources have reached its ``active`` or ``deleted`` state.
+- Module ``openstack.cloud.resource`` has been added. It provides an generic and uniform interface to create, update and delete any OpenStack cloud resource which openstacksdk supports. This module unlocks a huge amount of functionality from OpenStack clouds to Ansible users which has been inaccessible with existing modules so far.
+- Module ``openstack.cloud.resources`` has been added. It provides an generic and uniform interface to list any type of OpenStack cloud resources which openstacksdk supports. This module fetch any OpenStack cloud resource without having to implement a new Ansible ``*_info`` module for this type of resource first.
+- Module ``openstack.cloud.subnet_pool`` has been added. It allows to create and delete subnet pools in OpenStack clouds.
+- Module examples have been improved and updated for most modules.
+- Module results have been properly documented for all modules.
+- Options in all modules have been renamed to match openstacksdk's attribute names (if applicable). The previous option names have been added as aliases to keep module options backward compatible.
+- Our CI integration tests have been massively expanded. Our test coverage spans across all modules and plugins now, including tests for our inventory plugin and our new ``openstack.cloud.resource`` and ``openstack.cloud.resources`` modules.
+- Our contributors documentation has been heavily extended. In directory ``docs`` you will find the rationale for our branching strategy, a developer's guide on how to contribute to the collection, a tutorial to set up a DevStack environment for hacking on and testing the collection, a step-by-step guide for publishing new releases and a list of questions to ask when doing reviews or submitting patches for review.
-Release Summary
----------------
+Minor Changes
+-------------
-Bugfixes
+- Added generic module options ``sdk_log_path`` and ``sdk_log_level`` which allow to track openstacksdk activity.
+- Many more options were added to modules but we stopped counting at one point...
+- Module ``openstack.cloud.coe_cluster`` gained support for option ``is_floating_ip_enabled``.
+- Module ``openstack.cloud.lb_listener`` gained options ``default_tls_container_ref`` and ``sni_container_refs`` which allow to specify TLS certificates when using the ``TERMINATED_HTTPS`` protocol.
+- Module ``openstack.cloud.network`` gained support for updates, i.e. existing networks will be properly updated now when module options such as ``mtu`` or ``admin_state_up`` have been changed.
+- Module ``openstack.cloud.port`` gained an ``description`` option.
+- Module ``openstack.cloud.role_assignment`` gained an ``system`` option.
+- Module ``openstack.cloud.security_group_rule`` gained an ``description`` option.
+- Module ``openstack.cloud.server_action`` gained an option ``all_projects`` which allows to execute actions on servers outside of the current auth-scoped project (if the user has permission to do so).
+- Module ``openstack.cloud.server_info`` gained an ``description`` option.
+- Module ``openstack.cloud.server`` gained an ``description`` option.
+- Module ``openstack.cloud.server`` gained support for updates. For example, options such as ``description`` and floating ip addresses can be updated now.
+- Module ``openstack.cloud.subnet`` gained an ``subnet_pool`` option.
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- 2.x.x releases of this collection are not backward compatible with 1.x.x releases. Backward compatibility is guaranteed within each release series only. Module options have been kept backward compatible across both release series, apart from a few exceptions noted below. However, module results have changed for most modules due to deep changes in openstacksdk. For easier porting and usage, we streamlined return values across modules and documented return values of all modules.
+- Default value for option ``security_groups`` in ``openstack.cloud.server`` has been changed from ``['default']`` to ``[]`` because the latter is the default in python-openstackclient and the former behavior causes issues with existing servers.
+- Dropped symbolic links with prefix ``os_`` and plugin routing for deprecated ``os_*`` module names. This means users have to call modules of the Ansible OpenStack collection using their FQCN (Fully Qualified Collection Name) such as ``openstack.cloud.server``. Short module names such as ``os_server`` will now raise an Ansible error.
+- Module ``openstack.cloud.project_access`` has been split into two separate modules ``openstack.cloud.compute_flavor_access`` and ``openstack.cloud.volume_type_access``.
+- Option ``availability_zone`` has been removed from the list of generic options available in all modules. Instead it has been inserted into the ``openstack.cloud.server`` and ``openstack.cloud.volume`` modules because it is relevant to those two modules only.
+- Option ``name`` of module ``openstack.cloud.port`` is required now because it is used to find, update and delete ports and idempotency would break otherwise.
+- Option ``policies`` has been replaced with option ``policy`` in module ``openstack.cloud.server_group``. The former is ancient and was superceded by ``policy`` a long time ago.
+- Release series 2.x.x of this collection is compatible with openstacksdk 1.0.0 and later only. For compatibility with openstacksdk < 0.99.0 use release series 1.x.x of this collection. Ansible will raise an error when modules and plugins in this collection are used with an incompatible release of openstacksdk.
+- Special value ``auto`` for option ``id`` in module ``openstack.cloud.compute_flavor`` has been deprecated to be consistent with our other modules and openstacksdk's behaviour.
+
+Deprecated Features
+-------------------
+
+- Option ``is_public`` in module ``openstack.cloud.image`` has been deprecated and replaced with option ``visibility``.
+- Option ``volume`` in module ``openstack.cloud.image`` has been deprecated and it should be replaced with module ``openstack.cloud.volume`` in user code.
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- Dropped deprecated ``skip_update_of_driver_password`` option from module ``openstack.cloud.baremetal_node``.
+- Dropped unmaintained, obsolete and broken inventory script ``scripts/inventory/openstack_inventory.py``. It had been replaced with a proper Ansible inventory plugin ``openstack.cloud.openstack`` during the 1.x.x life cycle.
+- Module ``openstack.cloud.object`` no longer allows to create and delete containers, its sole purpose is managing an object in a container now. Use module ``openstack.cloud.object_container`` to managing Swift containers instead.
+- Option ``listeners`` has been removed from module ``openstack.cloud.loadbalancer`` because it duplicates a subset of the functionality (and code) provided by our ``openstack.cloud.lb_{listener,member,pool}`` modules.
+- Our outdated, undocumented, untested and bloated code templates in ``contrib`` directory which could be used to generate and develop new Ansible modules for this collection have been removed.
Bugfixes
--------
-- Fix collection guidelines
+- Ansible check mode has been fixed in module ``openstack.cloud.compute_flavor``, it will no longer apply changes when check mode is enabled.
+- Creating load-balancers with module ``openstack.cloud.loadbalancer`` properly handles situations where several provider networks exist. A floating ip address specified in option ``floating_ip_address`` will be allocated from Neutron external network specified in option ``floating_ip_network``.
+- Default values for options ``shared``, ``admin_state_up`` and ``external`` in module ``openstack.cloud.network`` have been dropped because they cause failures for clouds which do not have those optional extensions installed.
+- Dropped default values for options ``min_disk`` and ``min_ram`` in module ``openstack.cloud.image`` because it interferes with its update mechanism and Glance uses those values anyway. Fixed handling of options ``name``, ``id``, ``visibility`` and ``is_public``.
+- Module ``openstack.cloud.baremetal_node_info`` will now properly return machine details when iterating over all available baremetal nodes.
+- Module ``openstack.cloud.host_aggregate`` now correctly handles ``hosts`` not being set or being set to ``None``.
+- Module ``openstack.cloud.identity_user`` will no longer fail when no password is supplied since Keystone allows to create a user without an password.
+- Module ``openstack.cloud.keypair`` no longer removes trailing spaces when reading a public key because this broke idempotency when using openstackclient and this module at the same time.
+- Module ``openstack.cloud.quota`` no longer sends invalid attributes such as ``project_id`` to OpenStack API when updating Nova, Neutron and Cinder quotas.
+- Module ``openstack.cloud.server`` will no longer change security groups to ``['default']`` on existing servers when option ``security_groups`` has not been specified.
+- Module ``openstack.cloud.subnet`` now properly handles updates, thus idempotency has been fixed and restored.
+- Modules ``openstack.cloud.security_group`` and ``openstack.cloud.security_group_rule`` gained support for specifying string ``any`` as a valid protocol in security group rules.
+- Option ``interfaces`` in module ``openstack.cloud.router`` no longer requires option ``network`` to be set, it is ``external_fixed_ips`` what requires ``network``.
+- Option ``is_public`` in module ``openstack.cloud.image`` will now be handled as a boolean instead of a string to be compatible to Glance API and fix issues when interacting with Glance service.
+- Option ``network`` in module ``openstack.cloud.router`` is now propery marked as required by options ``enable_snat`` and ``external_fixed_ips``.
+- Option ``owner`` in module ``openstack.cloud.image`` is now respected when searching for and creating images.
+- Our OpenStack inventory plugin now properly supports Ansible's cache feature.
v1.7.1
======
diff --git a/ansible_collections/openstack/cloud/CONTRIBUTING.rst b/ansible_collections/openstack/cloud/CONTRIBUTING.rst
deleted file mode 100644
index cf632ce37..000000000
--- a/ansible_collections/openstack/cloud/CONTRIBUTING.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-.. _contributing:
-
-=============================================
-Contributing to ansible-collections-openstack
-=============================================
-
-If you're interested in contributing to the ansible-collections-openstack project,
-the following will help get you started.
-
-Developer Workflow
-------------------
-
-OpenStack uses OpenDev for it's development, and patches are submitted to
-`OpenDev Gerrit`_. Please read `DeveloperWorkflow`_ before sending your
-first patch for review.
-
-Pull requests submitted through GitHub will be ignored.
-
-.. seealso::
-
- * https://wiki.openstack.org/wiki/How_To_Contribute
- * https://wiki.openstack.org/wiki/CLA
-
-.. _OpenDev Gerrit: https://review.opendev.org/
-.. _DeveloperWorkflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Project Hosting Details
------------------------
-
-Bug tracker
- https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
-
-Mailing list (prefix subjects with ``[ansible]`` for faster responses)
- http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
-
-Code Hosting
- https://opendev.org/openstack/ansible-collections-openstack
-
-Code Review
- https://review.opendev.org/#/q/status:open+project:openstack/ansible-collections-openstack,n,z
diff --git a/ansible_collections/openstack/cloud/FILES.json b/ansible_collections/openstack/cloud/FILES.json
index 15234f6ab..fcd228b56 100644
--- a/ansible_collections/openstack/cloud/FILES.json
+++ b/ansible_collections/openstack/cloud/FILES.json
@@ -8,38 +8,24 @@
"format": 1
},
{
- "name": "scripts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "scripts/inventory",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "scripts/inventory/openstack.yml",
+ "name": "COPYING",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9d2659ec793a078ba38a521ea59eb2c281193c9bde5002da230c76381f71e95d",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
"format": 1
},
{
- "name": "scripts/inventory/openstack_inventory.py",
+ "name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "99e4ff9d5016c4410b3a2f0558bfabdaa2ec4b1b01a354b59c6b0277e4deaceb",
+ "chksum_sha256": "bdbc15dc3f351a013de8879c2b0dab5fe28c1eb246e3de2185e1d5de361fee3a",
"format": 1
},
{
- "name": "requirements.txt",
+ "name": "bindep.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "deeb6c325faa9932a41908dd06468762b1c2a4d24d471b24e91f02ee3eae9363",
+ "chksum_sha256": "955ed5d9eb93b169f416a9532f5b39a7551d2ec74f6841b8b819c50f135f39e8",
"format": 1
},
{
@@ -50,38 +36,38 @@
"format": 1
},
{
- "name": "docs/openstack_guidelines.rst",
+ "name": "docs/branching.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3f010ee14ee1a28bf27c4f041054282052d5bd57e08faef0a63af7960030f714",
+ "chksum_sha256": "c3e54bfbdb2209bd63f138bc53f53cc7c3b7343180ad6db79d8bce8c01d5ae1a",
"format": 1
},
{
- "name": "setup.py",
+ "name": "docs/contributing.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7478e980356fe29366647e7c7d6687b7da68d1ea763d4ca865a75ca9498db1c2",
+ "chksum_sha256": "d80ef056626e54b7d6d6fd1250a1a981e1d5009255d9fdb6332f50c74e5da3fb",
"format": 1
},
{
- "name": "COPYING",
+ "name": "docs/devstack.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "chksum_sha256": "638c5453008f87a37ac4bd50c804a1abcdfe57dcde2231c2684e7e81b0afd458",
"format": 1
},
{
- "name": "CONTRIBUTING.rst",
+ "name": "docs/releasing.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c5017ba9aff6506564036e816e8bbb7d8d7c9e8acd4a94ffce3c269c51b96ee1",
+ "chksum_sha256": "52bfadcfd9a56eaebfd5da94cf36f1e00f8caadccef686a59cce04f122e327a0",
"format": 1
},
{
- "name": "CHANGELOG.rst",
+ "name": "docs/reviewing.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8de9b2e83ca22ed1b3b97b5103f3396d934f04e34511e436cf6ade954e444506",
+ "chksum_sha256": "1025376447d66f5b273fb6cc125d5ffd9a2c3f27fb5b9592820347b837edbf2f",
"format": 1
},
{
@@ -95,14 +81,7 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2aeded4e76257888e106bdd3fec30ce7eff64dd44b58eb576122f6045491e530",
- "format": 1
- },
- {
- "name": "README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8c2f9a2fece180fd309842b8d8926b0e2e9369149711f0582e71606b7191db6",
+ "chksum_sha256": "cd7fddddf0db4a44522086bd44b5fb3848120e983b27136f8740ced95a3068df",
"format": 1
},
{
@@ -130,35 +109,7 @@
"name": "plugins/doc_fragments/openstack.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4911368e21c53178b13c45256e5134188b6ed45a4e4620b711534f3bc6405d1d",
- "format": 1
- },
- {
- "name": "plugins/module_utils",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "plugins/module_utils/ironic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c639b9bcd7ceb1f0b0fb8116cb7d7db33ceb2ee957f3b56d173d9146c845b38f",
- "format": 1
- },
- {
- "name": "plugins/module_utils/openstack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4ac8ccab9e5f6cffc972d895b380952e8dffb457f4fb84b175eba5226f65b63",
+ "chksum_sha256": "f04f8a1709fbefe7e3930c7abcd98ff60886df0e32edc585e5b58635c54014a7",
"format": 1
},
{
@@ -179,140 +130,49 @@
"name": "plugins/inventory/openstack.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57e8deb888d12c4f4d5da23f6bfb9381373a7637fb67fc42a9be9e4c3b9f0903",
+ "chksum_sha256": "7b43f7426ed052069ddeeb474ce9f71a515b01470ae41107b849a0fbfe7c1a98",
"format": 1
},
{
- "name": "plugins/modules",
+ "name": "plugins/module_utils",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/modules/os_flavor_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdc398e7e8a4eb653d4cd8dee462cfd3b9df04c5d81f25cb95a232ac1336470c",
- "format": 1
- },
- {
- "name": "plugins/modules/os_project_access.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a2c872a3cdac209eed029402650a31da1ced47621f866c792b1af930774b160",
- "format": 1
- },
- {
- "name": "plugins/modules/os_security_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "754dbf89abfcad4a7a7b9487a19123910f8ea5fde2677d1c209f0a90b0a418b7",
- "format": 1
- },
- {
- "name": "plugins/modules/object_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9bca51b4da9ccefff04fa55f8999ca60f8fdf9aae565be0fa70bef9b08962032",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keypair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83b0c1c2e8be79e9f4c665b1715619ee6fa09d032bbf227e5bae9f8492fc4eec",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_federation_protocol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2de6941c90bae6b288ba1f11fe5cd85f34441b487d2256c6f7fd5325cc7e4f0b",
- "format": 1
- },
- {
- "name": "plugins/modules/os_routers_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4930f4982ddbe6f823d5266e443e000c36e16adb59d511b091182f0be58e8d7e",
- "format": 1
- },
- {
- "name": "plugins/modules/neutron_rbac_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9634c6e8336bc13766bcf54dd5b8aeb5bc6d3d4f1bb2653cf56c50ddbd318a1",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_identity_provider_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c44bc1d617c4780fb5d45ed43d937ddce5140b01b10c0c769c85a3bfa55e9b72",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48db5ab304c1f74dd891c68ca4934788dfc84734135637dc93821bbee1f1f747",
- "format": 1
- },
- {
- "name": "plugins/modules/volume_snapshot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e34df3ee0c37bb951eff1623a6d9f1faf974846ff7c4a82411907e2c57156727",
- "format": 1
- },
- {
- "name": "plugins/modules/quota.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be2e920833e045fe816b233888b8d78f9612ddbf963e7366e8d58790dae87c96",
- "format": 1
- },
- {
- "name": "plugins/modules/os_nova_flavor.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d1d887e09cae6cfa4b2dcc913d496fbacc0f01b3f1ef6f6d71319798032f3f0",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_service.py",
+ "name": "plugins/module_utils/__init__.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a3c446e8f01dfb3b071aa3dbd27baf27faf882009f4c7594ae18cd43f30eb68",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "plugins/modules/identity_domain.py",
+ "name": "plugins/module_utils/ironic.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c4b16be01667ff78a36a99bc41b97a769a4600e330c1deeb4bd139443033691",
+ "chksum_sha256": "cdc32eb062140e9f126ed0bd98b0740a076aa977743993110fc014d35df3ce3d",
"format": 1
},
{
- "name": "plugins/modules/os_volume.py",
+ "name": "plugins/module_utils/openstack.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9973db07f021b2af58f3911790697fd220955b3ca8e03492ffcda1b4f7b7f19",
+ "chksum_sha256": "dcfdeeca9cc715de74f45b247de8051cec924431107d7e71abeee6efeba7d9b4",
"format": 1
},
{
- "name": "plugins/modules/os_keystone_endpoint.py",
+ "name": "plugins/module_utils/resource.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c550b7ed488bdbf198238436c2f8fbd365e729a94da3cb56d28dfc63fb9a9417",
+ "chksum_sha256": "7da5eabd71543fa9ea4619b339746b243fa9cd65f116dc8fdbe76d9f83c568c5",
"format": 1
},
{
- "name": "plugins/modules/object.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4c17b189f9f81865a9d95efe200756632f8c43a1cf66db39408e3bc5684f909",
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
@@ -323,892 +183,654 @@
"format": 1
},
{
- "name": "plugins/modules/security_group_rule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d9230992be555a34e15a589e63d4df7c1901dbf2a8926126965ca828189ba65",
- "format": 1
- },
- {
- "name": "plugins/modules/role_assignment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75f260a78e46b8e5635ebe2302862558beb6ffe98cf372041d2311efd11a61a3",
- "format": 1
- },
- {
- "name": "plugins/modules/stack_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "33847b5635e14e2234e526d1f3829a4161cc8756a53e1746662ebc77a3b57ea2",
- "format": 1
- },
- {
- "name": "plugins/modules/os_auth.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab740bc0c9a9944e4a573e867b2dc10a8539e2b4bfe8b0a5d3e3ea47f4c74f18",
- "format": 1
- },
- {
- "name": "plugins/modules/identity_group_info.py",
+ "name": "plugins/modules/address_scope.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fc02b94aa894a64913d29222f90a13d17ef46e5537e0bac70813133f0e111c55",
+ "chksum_sha256": "e66779b5ab9d33b42e48aa6fae76b456f5a0ff49cae14fc4a49269ec4b3b7278",
"format": 1
},
{
- "name": "plugins/modules/os_image.py",
+ "name": "plugins/modules/auth.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2b874819fa5022396d165d07b96b2cf4d7ead88efec899f6d260936aae2bbf0",
+ "chksum_sha256": "c9b3103360c18feb3e135febed7959e1a1917201591166daabf48353f981c8e3",
"format": 1
},
{
- "name": "plugins/modules/server_action.py",
+ "name": "plugins/modules/baremetal_deploy_template.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7195b495cb1d734003383d0ab355dd79ae1e7ac7a19719e0e44ca52577bec0d",
+ "chksum_sha256": "845cef9d17ba63b1e9eecc34ac6d1042585d906a691ad28fc09ed86594741728",
"format": 1
},
{
- "name": "plugins/modules/identity_group.py",
+ "name": "plugins/modules/baremetal_inspect.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8c24a4db33fd00809fa6f53be4fc8b74822fcc5d83a09fa31973306f576fc2a",
+ "chksum_sha256": "b672c7e98ec776baedbf6b08b4486384cda18d2101ac2b054746f1d12b8cdd5c",
"format": 1
},
{
- "name": "plugins/modules/volume.py",
+ "name": "plugins/modules/baremetal_node.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9973db07f021b2af58f3911790697fd220955b3ca8e03492ffcda1b4f7b7f19",
+ "chksum_sha256": "fd0dd8082a63504d72ca6567a3a2cdd40426788be1fe10d28cc7e3cf9fd6363e",
"format": 1
},
{
- "name": "plugins/modules/image.py",
+ "name": "plugins/modules/baremetal_node_action.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2b874819fa5022396d165d07b96b2cf4d7ead88efec899f6d260936aae2bbf0",
+ "chksum_sha256": "9d52734c533e50cc14ae529a284b6c65f853536e7036d0b51cc0b922d58238e7",
"format": 1
},
{
- "name": "plugins/modules/os_nova_host_aggregate.py",
+ "name": "plugins/modules/baremetal_node_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "404e5db01a7c1ffc69bbb21d09a96da253559968af4ec552520bc3cb5f56c827",
+ "chksum_sha256": "08468dfbb706c23a56fc462b3070eb71f773c29fdc6b242039cbcf88be0911d3",
"format": 1
},
{
- "name": "plugins/modules/network.py",
+ "name": "plugins/modules/baremetal_port.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "60e4f2066700eb7070c73a6d53cf0adf62d0f81d2214e319d1796f63d03cd576",
+ "chksum_sha256": "85a1add23975e2ec616088efe17ed2a157cd2d3fa322de14a06b50e7f93a7599",
"format": 1
},
{
- "name": "plugins/modules/os_project_info.py",
+ "name": "plugins/modules/baremetal_port_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b77706b7f9243c3a8e22ffa091dc47857d91b3aec0ef81843f1c062a0225e3bf",
+ "chksum_sha256": "7287faedf4c3d09ffb3e5e4cc759c25c6d27a4a645f5fd8d2d8eb88f4dbf803b",
"format": 1
},
{
- "name": "plugins/modules/os_recordset.py",
+ "name": "plugins/modules/catalog_service.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8f2142288c16a584ce2bd4b4e5d76c1a593331c3a505371d2e41f8e7c7722838",
+ "chksum_sha256": "cd6df94b2be318599594f6bfd80c46fbf935626dfa454a8ed09cb19cd88c7de5",
"format": 1
},
{
- "name": "plugins/modules/identity_role.py",
+ "name": "plugins/modules/catalog_service_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48db5ab304c1f74dd891c68ca4934788dfc84734135637dc93821bbee1f1f747",
+ "chksum_sha256": "c45a7f3edb6fa9b7bd9cef2865bd9951cbafea168c36dcad97cb3f4fdad9e419",
"format": 1
},
{
- "name": "plugins/modules/os_volume_snapshot.py",
+ "name": "plugins/modules/coe_cluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e34df3ee0c37bb951eff1623a6d9f1faf974846ff7c4a82411907e2c57156727",
+ "chksum_sha256": "dffbbbc98e6a3083c9d41940f106e46e2825eaba9f8dac45bf55c57307bc0129",
"format": 1
},
{
"name": "plugins/modules/coe_cluster_template.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58c1323cac4f27ba42e5e59af64e3d06f47737e47f5f2c1bbc1ef483aa93a1cb",
- "format": 1
- },
- {
- "name": "plugins/modules/endpoint.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c550b7ed488bdbf198238436c2f8fbd365e729a94da3cb56d28dfc63fb9a9417",
- "format": 1
- },
- {
- "name": "plugins/modules/keystone_federation_protocol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2de6941c90bae6b288ba1f11fe5cd85f34441b487d2256c6f7fd5325cc7e4f0b",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c4b16be01667ff78a36a99bc41b97a769a4600e330c1deeb4bd139443033691",
+ "chksum_sha256": "27016ad45f7e593e86f81b57bdf286b82140f57ba0785d9d0d4944a821ed3189",
"format": 1
},
{
- "name": "plugins/modules/os_ironic_node.py",
+ "name": "plugins/modules/compute_flavor.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "af77101be82c61538b12f94e8ec21b55945fd0a68fa23db280a4bb5afba26c18",
+ "chksum_sha256": "ffa5048d7d5cbe016a600f413b022669568137adbae9ebcba827fe59fc189a7b",
"format": 1
},
{
- "name": "plugins/modules/server_info.py",
+ "name": "plugins/modules/compute_flavor_access.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5273ffa48ad2c9e6d9ac54b72d1379272b29339d336b2caf6cef27a8b416c870",
+ "chksum_sha256": "19d414e1b976e2c3bcf296b451ef9e9a815f93ce858134a182e5ab72a76cb337",
"format": 1
},
{
- "name": "plugins/modules/os_stack.py",
+ "name": "plugins/modules/compute_flavor_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3cf3d41172c98d6139cc28f6db50fb4026ca1ff47b144ee8f54ac3f3f13c24f0",
+ "chksum_sha256": "66652a59d6fb55c5ce25e7a679a45c50748cb2362faa62085643cec278ad52c1",
"format": 1
},
{
- "name": "plugins/modules/loadbalancer.py",
+ "name": "plugins/modules/compute_service_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "367e7e79be4296d54095d5c9f0b6814b3f2ddacd529aed86b572350cb7000fa3",
+ "chksum_sha256": "7705d5af175014e53538ab8d0220c0c63d42fd7acc2f7f10c96c52f6c6090d47",
"format": 1
},
{
- "name": "plugins/modules/os_ironic.py",
+ "name": "plugins/modules/config.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b061ee236ec1cfcde3d261df46e20d5d27853f7a89b8386ff93b5a1c743f7b6",
+ "chksum_sha256": "d2a847c41528d8f0bb8dcec49d7e303e0df3c47e311d31245e194faea0e7ec5b",
"format": 1
},
{
- "name": "plugins/modules/os_keystone_federation_protocol_info.py",
+ "name": "plugins/modules/dns_zone.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e069c7cfacae0add6d6045141729925555babf7cf6adbc0ec68b9ee5ab240773",
+ "chksum_sha256": "3671efcd3bdf4c1ab708ddcc61b3e753ca572778f26be1060375f89d9224ed61",
"format": 1
},
{
"name": "plugins/modules/dns_zone_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bcc162b7985aad2aa2c3dfe9fa0eff2494f7c1b66770e3a05da31443fe50944e",
- "format": 1
- },
- {
- "name": "plugins/modules/networks_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09bd90ad0409668097c1389e259fd1e0b2c5f06d9ed36d7381d5edb1c6aaaf3b",
- "format": 1
- },
- {
- "name": "plugins/modules/project_access.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a2c872a3cdac209eed029402650a31da1ced47621f866c792b1af930774b160",
- "format": 1
- },
- {
- "name": "plugins/modules/os_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da27f9292c32782a778eb0da6881a34e02622bcd6ecafcde4337bab4f053b49c",
- "format": 1
- },
- {
- "name": "plugins/modules/baremetal_port_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2829ed9ea2a9a4f05b333d2a97485fc9fe496736624093ecf3d25d6b2a30668",
- "format": 1
- },
- {
- "name": "plugins/modules/os_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc02b94aa894a64913d29222f90a13d17ef46e5537e0bac70813133f0e111c55",
- "format": 1
- },
- {
- "name": "plugins/modules/os_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0723c67688a88c69ce248f44ef3fc192e4eaeab81d09738fb82ec8e6dc5b147",
+ "chksum_sha256": "84fb5b4e35d31181cf5f7c3589b3e2a020e3a9674f27d1118dffe4ba6b06618d",
"format": 1
},
{
- "name": "plugins/modules/identity_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0723c67688a88c69ce248f44ef3fc192e4eaeab81d09738fb82ec8e6dc5b147",
- "format": 1
- },
- {
- "name": "plugins/modules/os_member.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "adad29807477d5cfe6dde616ec978f71bee30c1bdf3af52c5151a24aee4dc1b7",
- "format": 1
- },
- {
- "name": "plugins/modules/identity_role_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6668597267f5fa47317032812a0c42cafe8767e63f30d880bbebec1ad0574799",
- "format": 1
- },
- {
- "name": "plugins/modules/lb_health_monitor.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9e1e13be7e52b6a00826ce22c9e1e631ad63aa4ba138b3155bb59064136fd03",
- "format": 1
- },
- {
- "name": "plugins/modules/os_port.py",
+ "name": "plugins/modules/endpoint.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c6215bcaa7c1b61a5dbcd698acb32cd5ba0afd2ae3e3c9d1b6b9e651f51d23ad",
+ "chksum_sha256": "6a729f7eb18dcf99f471343840e00abd4e566cf80256f1c2a0013fa53734801a",
"format": 1
},
{
- "name": "plugins/modules/identity_user_info.py",
+ "name": "plugins/modules/federation_idp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "da27f9292c32782a778eb0da6881a34e02622bcd6ecafcde4337bab4f053b49c",
+ "chksum_sha256": "3f741bf4148cd62f83c367b228866ab2002d46fb8983186c6aff36cf79419dee",
"format": 1
},
{
- "name": "plugins/modules/address_scope.py",
+ "name": "plugins/modules/federation_idp_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "71312db50316e108b4dd2e35c8a91ba2ae638112e9c472fc659c3aef62e6c3c6",
+ "chksum_sha256": "c4982dcae73f7a0c611d95bf6a3f244e74617c42a18f175e2c30559ea4692be1",
"format": 1
},
{
- "name": "plugins/modules/dns_zone.py",
+ "name": "plugins/modules/federation_mapping.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9dc86ee13a42726ead899f2ca54decfc9323ae4f2507df2b78c175e3c166d68f",
+ "chksum_sha256": "f5d636584f40ec2a0bdbfbd895fef412f6078d9e9cc4859cdc18872340334040",
"format": 1
},
{
- "name": "plugins/modules/os_subnets_info.py",
+ "name": "plugins/modules/federation_mapping_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b078e592ad361328d9ddf84a98c4066f88e59d9ce4a7bda25ccadd5830466fa",
+ "chksum_sha256": "7d73596e4ca2f1e2dad385d9f4f504f4dbc8e1fff83a7689ea18225e10e5204d",
"format": 1
},
{
- "name": "plugins/modules/lb_listener.py",
+ "name": "plugins/modules/floating_ip.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d3a00bfefc05b0b03b2a91ce04c03792ead2d823a5c83e487df3c8449ec48fa3",
+ "chksum_sha256": "f6f57dfae2720bbd11d699e43ef92f6cf0844807d487a95107d47f1f4f9f8008",
"format": 1
},
{
- "name": "plugins/modules/baremetal_node_info.py",
+ "name": "plugins/modules/floating_ip_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58e6246299b21cd59482bbbae20c434ba348f6e6d0994a663b42966249268019",
+ "chksum_sha256": "2eb45c05dd30265d51e35a28a736a30ac0a6d1605e12b3445ecef3500264da10",
"format": 1
},
{
"name": "plugins/modules/group_assignment.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e871d633590fbc8f686a084ce38a130c58cd1ac6dfd0cd151082655748de9f6",
+ "chksum_sha256": "63790feba80b2d3b9a693c492d029674e9eea31b084ee0492d4318e10b884f87",
"format": 1
},
{
- "name": "plugins/modules/federation_mapping.py",
+ "name": "plugins/modules/host_aggregate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b3b701b5ad55032a6e104e445d8cfd781b7f809719aa2a142463c395bde9854f",
+ "chksum_sha256": "c6bdfa57f2ab6a8fca5456915ff8c8849aaef9ab98cba8f650ab873e1312c8f3",
"format": 1
},
{
- "name": "plugins/modules/keypair.py",
+ "name": "plugins/modules/identity_domain.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83b0c1c2e8be79e9f4c665b1715619ee6fa09d032bbf227e5bae9f8492fc4eec",
+ "chksum_sha256": "c8e68ffe46a85f3712752ef8889f071cd09367ba480c57b3cc03012f7aa41c35",
"format": 1
},
{
- "name": "plugins/modules/os_floating_ip.py",
+ "name": "plugins/modules/identity_domain_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a5f29ed923bb4aa438f6f2ed33d0668edca8b5c7de22388d34b2998bf8348bd9",
+ "chksum_sha256": "06d0cbf55e2f118d3338d6e9b548905aa48b942c6e09bf7cc78999c47504086a",
"format": 1
},
{
- "name": "plugins/modules/security_group.py",
+ "name": "plugins/modules/identity_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "754dbf89abfcad4a7a7b9487a19123910f8ea5fde2677d1c209f0a90b0a418b7",
+ "chksum_sha256": "bc1347bb833d57af5fadb2d73e2fdc24823147a48625f82b2812aa9ff8556b9d",
"format": 1
},
{
- "name": "plugins/modules/subnet.py",
+ "name": "plugins/modules/identity_group_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1fc690f2df7e249da41125c7d8194134f2e6f465b53d4974c5f479bcc6be848",
+ "chksum_sha256": "d1b6427af973c2e3128498239bcc49c6546e3e0edc03428e8cbff9825d3a7ce5",
"format": 1
},
{
- "name": "plugins/modules/os_server_action.py",
+ "name": "plugins/modules/identity_role.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7195b495cb1d734003383d0ab355dd79ae1e7ac7a19719e0e44ca52577bec0d",
+ "chksum_sha256": "fc6ba22b1f28e8f72379976faf26f311759ba4e149ce2ca3e106bb40dc96517f",
"format": 1
},
{
- "name": "plugins/modules/federation_idp_info.py",
+ "name": "plugins/modules/identity_role_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c44bc1d617c4780fb5d45ed43d937ddce5140b01b10c0c769c85a3bfa55e9b72",
+ "chksum_sha256": "4ffb3a8fb48d137170587281eefed865b23071f2479d0b1f2f625bca68b80df1",
"format": 1
},
{
- "name": "plugins/modules/coe_cluster.py",
+ "name": "plugins/modules/identity_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ddfd58e836da83c52bfa09b5c8199b386bcdebe57d4a9108d094f278e9304073",
+ "chksum_sha256": "0f3d155a5e4e3e6ce99d452c8a6ec3c06a99889f0384822f7f89002646795ec5",
"format": 1
},
{
- "name": "plugins/modules/os_client_config.py",
+ "name": "plugins/modules/identity_user_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e0e3782be88d4734a2207cb3047a97ccbb9d1cd8e21d9eae845a667e7731ec8",
+ "chksum_sha256": "44e046cf6942cabe99d8205d485a068521f136d5afac8eb5b6c2236aa6dade3d",
"format": 1
},
{
- "name": "plugins/modules/os_image_info.py",
+ "name": "plugins/modules/image.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "db4db547e390ab5632e9741f88d1caec377122a318e006d4513a5c73b27a311b",
+ "chksum_sha256": "110359e940dbc67d1ec053233beb7755f985b4bb181c93964b41b9984aa9ea71",
"format": 1
},
{
- "name": "plugins/modules/os_server.py",
+ "name": "plugins/modules/image_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb975f024413bc4de5d0c57f8455f5d6460b7131e850e4a4b5acd583135ca5ac",
+ "chksum_sha256": "31c88b1f69fa1f9ce5eb2059dc301dd7e972e9810b0ced11de4571619ba541cd",
"format": 1
},
{
- "name": "plugins/modules/os_network.py",
+ "name": "plugins/modules/keypair.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "60e4f2066700eb7070c73a6d53cf0adf62d0f81d2214e319d1796f63d03cd576",
+ "chksum_sha256": "3c77bf4d2c7ba368d47ba147c370caf35143a388e7d048d45d398199eae6627c",
"format": 1
},
{
- "name": "plugins/modules/os_server_group.py",
+ "name": "plugins/modules/keypair_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a4d0f68578f8146796d6399b96788c230a4577dfeb247f4e84874abb5d7a7938",
+ "chksum_sha256": "2d64dd1406946bc76cd9c187d26d2e409d0c8a0d189d043a5607d5d999e46a0f",
"format": 1
},
{
- "name": "plugins/modules/catalog_service.py",
+ "name": "plugins/modules/keystone_federation_protocol.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a3c446e8f01dfb3b071aa3dbd27baf27faf882009f4c7594ae18cd43f30eb68",
+ "chksum_sha256": "4b86d3e6853e07caa2a3394cc0d20916ff94ebee80892e2b781080e1e5dd2660",
"format": 1
},
{
- "name": "plugins/modules/os_zone.py",
+ "name": "plugins/modules/keystone_federation_protocol_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9dc86ee13a42726ead899f2ca54decfc9323ae4f2507df2b78c175e3c166d68f",
+ "chksum_sha256": "c98818cb255c7d68bca4f91a5fddcc9b1b21636cb0096ca239043323bebf8ceb",
"format": 1
},
{
- "name": "plugins/modules/lb_pool.py",
+ "name": "plugins/modules/lb_health_monitor.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c512b83408267d8411649149c79f106137f9297cb15dfe18e5964af2424385d",
+ "chksum_sha256": "1ed5ca97faf5251bf1ea263de3c3c4932454d598a121aa0bc257ec484fab9034",
"format": 1
},
{
- "name": "plugins/modules/volume_snapshot_info.py",
+ "name": "plugins/modules/lb_listener.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58ebc78344140e5336a41295fcd782a6f562894eee1d9d159a466737f1d6ddc1",
+ "chksum_sha256": "06b9df458be0148422960ba26abba771f21d5dd92f6a16a0ac5cf9b1acfbda08",
"format": 1
},
{
"name": "plugins/modules/lb_member.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "adad29807477d5cfe6dde616ec978f71bee30c1bdf3af52c5151a24aee4dc1b7",
+ "chksum_sha256": "b660966d8a2e8c7dfb74732ba10fd1be6b611f5e665d71af644d7ba131fbdf9a",
"format": 1
},
{
- "name": "plugins/modules/os_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "660fbcce9d3ee244be7cde0f5dac04448660525df2750b7a4b0a025924f6c859",
- "format": 1
- },
- {
- "name": "plugins/modules/compute_flavor_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdc398e7e8a4eb653d4cd8dee462cfd3b9df04c5d81f25cb95a232ac1336470c",
- "format": 1
- },
- {
- "name": "plugins/modules/container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9bca51b4da9ccefff04fa55f8999ca60f8fdf9aae565be0fa70bef9b08962032",
- "format": 1
- },
- {
- "name": "plugins/modules/os_listener.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3a00bfefc05b0b03b2a91ce04c03792ead2d823a5c83e487df3c8449ec48fa3",
- "format": 1
- },
- {
- "name": "plugins/modules/volume_backup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "01f6678f0c0abebf952b14891074b9f043970bd88e17d984847a0a19ae94d7b8",
- "format": 1
- },
- {
- "name": "plugins/modules/federation_mapping_info.py",
+ "name": "plugins/modules/lb_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "700e84e9ef15cb3cd728e30e73a9cf996af0bf0053272591807459b283f750d1",
+ "chksum_sha256": "cc3e450e6a1f4adbfa63949668584d84e096831b34a5c124f8449fed89193b9c",
"format": 1
},
{
- "name": "plugins/modules/volume_backup_info.py",
+ "name": "plugins/modules/loadbalancer.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b05c61791bdbeeff7560eebd50babc4b20c0e6737acea040bc06c9aedbf7820d",
+ "chksum_sha256": "b75c7d9daf0c39287c2634b9bbd6bd1d1cc3a4b9d27fb07c03aad99180516202",
"format": 1
},
{
- "name": "plugins/modules/os_coe_cluster_template.py",
+ "name": "plugins/modules/network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58c1323cac4f27ba42e5e59af64e3d06f47737e47f5f2c1bbc1ef483aa93a1cb",
+ "chksum_sha256": "ac3fd4ae4c63b6dc6b8d1dbf852d460a51547f66328f9dbb49ed45d4af0659dc",
"format": 1
},
{
- "name": "plugins/modules/baremetal_port.py",
+ "name": "plugins/modules/networks_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8456b8b728694c510138c48f618f487922b97a4dbe4f983c74675d9cfe47e516",
+ "chksum_sha256": "035d91fe3eb855a8aec09fc43021d3018e4935e753fee995736767561390796a",
"format": 1
},
{
"name": "plugins/modules/neutron_rbac_policies_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88a186ca5deb3454ebd414c7cb083446eef0191065ae8f7072990c467fc6f4a2",
- "format": 1
- },
- {
- "name": "plugins/modules/os_user_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e871d633590fbc8f686a084ce38a130c58cd1ac6dfd0cd151082655748de9f6",
+ "chksum_sha256": "0f2bfb88b98a813b89abe2b8d2a566baf2b985dda556a726964111da3bdcf358",
"format": 1
},
{
- "name": "plugins/modules/security_group_info.py",
+ "name": "plugins/modules/neutron_rbac_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e489f79e6951049d88e64960506f10eabe8da1179ba23227dfdffc2d11f34df9",
+ "chksum_sha256": "9e2a5b73783bc2f8b4e1c61721c6fe2289c939baa7a1abbe8e9aa1dcd276ccab",
"format": 1
},
{
- "name": "plugins/modules/server_metadata.py",
+ "name": "plugins/modules/object.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1a2b905c8255917e024ece1e639f6c1218a73af5f523d2011e91f4a2bcb3bbe",
+ "chksum_sha256": "d621f2cf9a7dfc965180b088b3b9610258417ae1cff51f55f65358ad16f22479",
"format": 1
},
{
- "name": "plugins/modules/server_volume.py",
+ "name": "plugins/modules/object_container.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf7ec5168e5bcdbec0f738815b46da9c429d644fed768284312af7a6e5822d99",
+ "chksum_sha256": "900c0dafd3ec54d80518298defb100c4285c0f9a23e35356224a4e395e5293d2",
"format": 1
},
{
- "name": "plugins/modules/config.py",
+ "name": "plugins/modules/port_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e0e3782be88d4734a2207cb3047a97ccbb9d1cd8e21d9eae845a667e7731ec8",
+ "chksum_sha256": "61c28794b8d03829dc917c88227e10c01910b32ee405e7ec33daf4c5b636e702",
"format": 1
},
{
- "name": "plugins/modules/federation_idp.py",
+ "name": "plugins/modules/project.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "68de32545531603a81fec1152aea68b39a481c9b65dd0f6a1b78b04b5a3dee49",
+ "chksum_sha256": "c733b94c3941ee4a45bc9df719476bb665726f41ac1446b92ed313606e7ad933",
"format": 1
},
{
- "name": "plugins/modules/compute_service_info.py",
+ "name": "plugins/modules/project_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b84427fb13bcce805d0841462699a9a8f6709b96a68da61ef2f309625102714d",
+ "chksum_sha256": "c77c99b507dc4fb9fdafc0fe7a668541ffa01b0a951a4d62226de18fc7a4336c",
"format": 1
},
{
- "name": "plugins/modules/os_keystone_mapping_info.py",
+ "name": "plugins/modules/quota.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "700e84e9ef15cb3cd728e30e73a9cf996af0bf0053272591807459b283f750d1",
+ "chksum_sha256": "7ef5c63de867de309275a3e101f087fc7d2e3bc2d17df40a240572a3af2db62e",
"format": 1
},
{
- "name": "plugins/modules/identity_domain_info.py",
+ "name": "plugins/modules/recordset.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a4f4ac14078e948505b4c14a1a17916264f80a68f0149bfd116c284db472caf7",
+ "chksum_sha256": "1b937a788bf5684d9c47222fa7b8269a0f7a10e4891bc504f21c7c84928f1026",
"format": 1
},
{
- "name": "plugins/modules/port.py",
+ "name": "plugins/modules/resource.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c6215bcaa7c1b61a5dbcd698acb32cd5ba0afd2ae3e3c9d1b6b9e651f51d23ad",
+ "chksum_sha256": "9b5730c3e5379426c687b4e5fdbf3fe841223c283c5bf91d46e71cda183f2405",
"format": 1
},
{
- "name": "plugins/modules/project.py",
+ "name": "plugins/modules/resources.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "660fbcce9d3ee244be7cde0f5dac04448660525df2750b7a4b0a025924f6c859",
+ "chksum_sha256": "0af19b70de7e357fc1f69c848548287b377d62298cd8a7705c39e9ae1c909a0a",
"format": 1
},
{
- "name": "plugins/modules/os_server_metadata.py",
+ "name": "plugins/modules/role_assignment.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1a2b905c8255917e024ece1e639f6c1218a73af5f523d2011e91f4a2bcb3bbe",
+ "chksum_sha256": "198edd0b088e7f5a894752ab3066cb8bd00be1a294638aceadcb82047661a04c",
"format": 1
},
{
"name": "plugins/modules/routers_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4930f4982ddbe6f823d5266e443e000c36e16adb59d511b091182f0be58e8d7e",
+ "chksum_sha256": "685d529d4fbe68ead03499d6a03e15083f83f26d16bad143dfd8d2a8e7336a20",
"format": 1
},
{
- "name": "plugins/modules/os_port_info.py",
+ "name": "plugins/modules/security_group_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d2c81ab4cc3ca798c3fcf039dff0c5dbf54c9b5db2da43af839e0a7dbc9f48c1",
+ "chksum_sha256": "cf4644a15afea6d552fa853914ce835f32ce2f943bffbc5cc2cc87e8fd56ff14",
"format": 1
},
{
- "name": "plugins/modules/baremetal_node_action.py",
+ "name": "plugins/modules/security_group_rule.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "af77101be82c61538b12f94e8ec21b55945fd0a68fa23db280a4bb5afba26c18",
+ "chksum_sha256": "0f8b14fe4f845a9d6b36796694892c81be814348a0e484f11fb7531e473fa205",
"format": 1
},
{
- "name": "plugins/modules/subnets_info.py",
+ "name": "plugins/modules/security_group_rule_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b078e592ad361328d9ddf84a98c4066f88e59d9ce4a7bda25ccadd5830466fa",
+ "chksum_sha256": "a4d614f6d448ae6b74a35509e12e0c848e782753519830b4fabcc54a46466f12",
"format": 1
},
{
"name": "plugins/modules/server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb975f024413bc4de5d0c57f8455f5d6460b7131e850e4a4b5acd583135ca5ac",
+ "chksum_sha256": "bccb18b463450f2d034fd02da464a7082614178991f9128161154793c99f64d5",
"format": 1
},
{
- "name": "plugins/modules/os_keystone_domain_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4f4ac14078e948505b4c14a1a17916264f80a68f0149bfd116c284db472caf7",
- "format": 1
- },
- {
- "name": "plugins/modules/security_group_rule_info.py",
+ "name": "plugins/modules/server_action.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "28903c09f07bb03948bc07c8f419d94adb73adfad780ae96a1b45d7ef4cc3732",
+ "chksum_sha256": "5237b09d6e2e563a911d002cf39f836368d31593337c6986ba0ebfe336214dc3",
"format": 1
},
{
- "name": "plugins/modules/project_info.py",
+ "name": "plugins/modules/server_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b77706b7f9243c3a8e22ffa091dc47857d91b3aec0ef81843f1c062a0225e3bf",
+ "chksum_sha256": "ed12da5c387209bcf97acff5945a7d072c5a85af7fbab0559b4bf041c7e683f1",
"format": 1
},
{
- "name": "plugins/modules/volume_info.py",
+ "name": "plugins/modules/server_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6166466c8cd8989261f40c6928c9e4c1dd62a3749ca10e5bc16f8c633b009db6",
+ "chksum_sha256": "f03c8ebbf1673f78984b75e5f237e5216ecfd601b3145dae707fba95e2438fd3",
"format": 1
},
{
- "name": "plugins/modules/image_info.py",
+ "name": "plugins/modules/server_metadata.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "db4db547e390ab5632e9741f88d1caec377122a318e006d4513a5c73b27a311b",
+ "chksum_sha256": "e9bbe26e5c285e9277a1c135026b70310b8e369cf98ce1fe761c08f313e4bbd5",
"format": 1
},
{
- "name": "plugins/modules/os_server_volume.py",
+ "name": "plugins/modules/server_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf7ec5168e5bcdbec0f738815b46da9c429d644fed768284312af7a6e5822d99",
+ "chksum_sha256": "d34c90380a2eff488ff87f5983da4e2fb580c1e1fd40355d724828c7df493168",
"format": 1
},
{
- "name": "plugins/modules/os_pool.py",
+ "name": "plugins/modules/stack.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c512b83408267d8411649149c79f106137f9297cb15dfe18e5964af2424385d",
+ "chksum_sha256": "19570cba3b74fd476f8246fc53622c7bd315ff1d46113dabffe239417712fbad",
"format": 1
},
{
- "name": "plugins/modules/os_ironic_inspect.py",
+ "name": "plugins/modules/stack_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ff83c3b479131331f3af9366d00ef47328faf1050fb7be44ccb18f9dd02e9f1b",
+ "chksum_sha256": "d814a9ac0fe9245668a174f919bea0732bfa8060145dc2b743c000e80abfd76b",
"format": 1
},
{
- "name": "plugins/modules/floating_ip.py",
+ "name": "plugins/modules/subnet.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a5f29ed923bb4aa438f6f2ed33d0668edca8b5c7de22388d34b2998bf8348bd9",
+ "chksum_sha256": "d77a4e7a81b3a8b06937f2551cede9a88d0c50d4ef74f07b42e6dd0a41205e50",
"format": 1
},
{
"name": "plugins/modules/subnet_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6f58ea2c0d44c77b0b5f6c540cd0294a1b32d3c354d4ab859dbf8c778039735b",
- "format": 1
- },
- {
- "name": "plugins/modules/os_router.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "caccea04d162cdedfc635ac1e702529af4fbaca498dc31f6da14005e1c773584",
- "format": 1
- },
- {
- "name": "plugins/modules/os_keystone_mapping.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b3b701b5ad55032a6e104e445d8cfd781b7f809719aa2a142463c395bde9854f",
- "format": 1
- },
- {
- "name": "plugins/modules/recordset.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f2142288c16a584ce2bd4b4e5d76c1a593331c3a505371d2e41f8e7c7722838",
+ "chksum_sha256": "73aed8ed6e2d21e8d4ad290e053230fb54c006c042182d192f25927f3ba7d24b",
"format": 1
},
{
- "name": "plugins/modules/server_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4d0f68578f8146796d6399b96788c230a4577dfeb247f4e84874abb5d7a7938",
- "format": 1
- },
- {
- "name": "plugins/modules/compute_flavor.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d1d887e09cae6cfa4b2dcc913d496fbacc0f01b3f1ef6f6d71319798032f3f0",
- "format": 1
- },
- {
- "name": "plugins/modules/baremetal_node.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b061ee236ec1cfcde3d261df46e20d5d27853f7a89b8386ff93b5a1c743f7b6",
- "format": 1
- },
- {
- "name": "plugins/modules/os_subnet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1fc690f2df7e249da41125c7d8194134f2e6f465b53d4974c5f479bcc6be848",
- "format": 1
- },
- {
- "name": "plugins/modules/os_networks_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09bd90ad0409668097c1389e259fd1e0b2c5f06d9ed36d7381d5edb1c6aaaf3b",
- "format": 1
- },
- {
- "name": "plugins/modules/router.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "caccea04d162cdedfc635ac1e702529af4fbaca498dc31f6da14005e1c773584",
- "format": 1
- },
- {
- "name": "plugins/modules/keystone_federation_protocol_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e069c7cfacae0add6d6045141729925555babf7cf6adbc0ec68b9ee5ab240773",
- "format": 1
- },
- {
- "name": "plugins/modules/os_user_role.py",
+ "name": "plugins/modules/subnets_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "75f260a78e46b8e5635ebe2302862558beb6ffe98cf372041d2311efd11a61a3",
+ "chksum_sha256": "3779624320ea2e4072d5dc81b4133b1ee5619ed08fb3d2dc3496b3726a855b5e",
"format": 1
},
{
- "name": "plugins/modules/keypair_info.py",
+ "name": "plugins/modules/volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a0cc8acbe1f58ee675d72fdd92332a881233ae71d320468dd409080889a56223",
+ "chksum_sha256": "69ad64045aca5e53ee94f68db620715c03ef5ac5b7989ae5f5640305830b2b54",
"format": 1
},
{
- "name": "plugins/modules/auth.py",
+ "name": "plugins/modules/volume_backup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ab740bc0c9a9944e4a573e867b2dc10a8539e2b4bfe8b0a5d3e3ea47f4c74f18",
+ "chksum_sha256": "5795cc718a64a12976188e3d1c260e27466e7bd1f1bf384149fed8766d02095b",
"format": 1
},
{
- "name": "plugins/modules/os_server_info.py",
+ "name": "plugins/modules/volume_backup_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5273ffa48ad2c9e6d9ac54b72d1379272b29339d336b2caf6cef27a8b416c870",
+ "chksum_sha256": "013f07903b9ebe2fe9a797cce0d10d04a1928b809c3e2e502acb256110fd13f0",
"format": 1
},
{
- "name": "plugins/modules/os_group.py",
+ "name": "plugins/modules/volume_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8c24a4db33fd00809fa6f53be4fc8b74822fcc5d83a09fa31973306f576fc2a",
+ "chksum_sha256": "aa7498d864991ddc9783a1acfa8a566f9155aa28aee3a326262cb68ed9eb7840",
"format": 1
},
{
- "name": "plugins/modules/os_loadbalancer.py",
+ "name": "plugins/modules/volume_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "367e7e79be4296d54095d5c9f0b6814b3f2ddacd529aed86b572350cb7000fa3",
+ "chksum_sha256": "4ed02ed061fd57ca34ee86864fd04d848cd4a1d9ac873bfc116a90628dcfe5a1",
"format": 1
},
{
- "name": "plugins/modules/os_coe_cluster.py",
+ "name": "plugins/modules/volume_snapshot_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ddfd58e836da83c52bfa09b5c8199b386bcdebe57d4a9108d094f278e9304073",
+ "chksum_sha256": "e67ce84ff99bdee792c2160a740a9886b87eb076ed2706d2914302519d2b037c",
"format": 1
},
{
- "name": "plugins/modules/os_object.py",
+ "name": "plugins/modules/volume_type_access.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4c17b189f9f81865a9d95efe200756632f8c43a1cf66db39408e3bc5684f909",
+ "chksum_sha256": "b78be1234d9fc132a9a3223770b0a47c9b5c3f95ed2ec06beee64a3725cadb79",
"format": 1
},
{
- "name": "plugins/modules/floating_ip_info.py",
+ "name": "plugins/modules/security_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b867eca5a2ed722c00be2d086d3575604c293b549aa2b1ed9e83c6e45c15d9d4",
+ "chksum_sha256": "e60e111da04eeddf0c91571774006e662570817cb89667a84b9380c6f52efa10",
"format": 1
},
{
- "name": "plugins/modules/os_security_group_rule.py",
+ "name": "plugins/modules/port.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d9230992be555a34e15a589e63d4df7c1901dbf2a8926126965ca828189ba65",
+ "chksum_sha256": "d607c26c14dbdacb6ecc38263de2ace6c813ca190296d50a0a71df8fc6e4e3dd",
"format": 1
},
{
- "name": "plugins/modules/stack.py",
+ "name": "plugins/modules/router.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3cf3d41172c98d6139cc28f6db50fb4026ca1ff47b144ee8f54ac3f3f13c24f0",
+ "chksum_sha256": "4186046a5449503ec65013d32844fa46949fdb613e6ad038a3f09182dc25c2e4",
"format": 1
},
{
- "name": "plugins/modules/port_info.py",
+ "name": "plugins/modules/volume_type.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d2c81ab4cc3ca798c3fcf039dff0c5dbf54c9b5db2da43af839e0a7dbc9f48c1",
+ "chksum_sha256": "3ceab09c5726dcdc5c5a8979b295f2cd4822136f4f8399b50bd44f88ffa3f115",
"format": 1
},
{
- "name": "plugins/modules/host_aggregate.py",
+ "name": "plugins/modules/volume_type_encryption.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "404e5db01a7c1ffc69bbb21d09a96da253559968af4ec552520bc3cb5f56c827",
+ "chksum_sha256": "ede3b32f788446b493e591ad66d78b82e6a32671678d572697f772513db1648a",
"format": 1
},
{
- "name": "plugins/modules/baremetal_inspect.py",
+ "name": "plugins/modules/volume_type_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ff83c3b479131331f3af9366d00ef47328faf1050fb7be44ccb18f9dd02e9f1b",
+ "chksum_sha256": "fdd99580deea452c1681920aa03f4d136aabc046cce32c8b0255ab17ef11efba",
"format": 1
},
{
- "name": "plugins/modules/os_keystone_identity_provider.py",
+ "name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "68de32545531603a81fec1152aea68b39a481c9b65dd0f6a1b78b04b5a3dee49",
+ "chksum_sha256": "7e3f5c24213614160a893785e15639d75e4b0533567a0301c83b801cbacbac65",
"format": 1
},
{
- "name": "plugins/modules/os_quota.py",
+ "name": "setup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "be2e920833e045fe816b233888b8d78f9612ddbf963e7366e8d58790dae87c96",
+ "chksum_sha256": "7478e980356fe29366647e7c7d6687b7da68d1ea763d4ca865a75ca9498db1c2",
"format": 1
},
{
- "name": "bindep.txt",
+ "name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "955ed5d9eb93b169f416a9532f5b39a7551d2ec74f6841b8b819c50f135f39e8",
+ "chksum_sha256": "77b40201dd42fa67ee2636c6fc9c222438d6ba348d0a9524566108ca5c547c74",
"format": 1
}
],
diff --git a/ansible_collections/openstack/cloud/MANIFEST.json b/ansible_collections/openstack/cloud/MANIFEST.json
index 4f726d8cb..63d7ed815 100644
--- a/ansible_collections/openstack/cloud/MANIFEST.json
+++ b/ansible_collections/openstack/cloud/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "openstack",
"name": "cloud",
- "version": "1.10.0",
+ "version": "2.2.0",
"authors": [
"Openstack"
],
@@ -26,7 +26,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "35409e9382d24dca7a0a4982b613e6526f68e2dd2036c9a9dbb74dece1f7ab7a",
+ "chksum_sha256": "4db28422338a529ec6ba6b5d02e8097a3cc9ee36e6278082d599eb8773f58a31",
"format": 1
},
"format": 1
diff --git a/ansible_collections/openstack/cloud/README.md b/ansible_collections/openstack/cloud/README.md
index 5dd8c9d08..020d64e51 100644
--- a/ansible_collections/openstack/cloud/README.md
+++ b/ansible_collections/openstack/cloud/README.md
@@ -1,94 +1,84 @@
-[![OpenDev Zuul Builds - Ansible Collection OpenStack](https://zuul-ci.org/gated.svg)](http://zuul.opendev.org/t/openstack/builds?project=openstack%2Fansible-collections-openstack#)
+[![OpenDev Zuul Builds - Ansible OpenStack Collection](https://zuul-ci.org/gated.svg)](
+http://zuul.opendev.org/t/openstack/builds?project=openstack%2Fansible-collections-openstack)
-# Ansible Collection: openstack.cloud
+# Ansible OpenStack Collection
-This repo hosts the `openstack.cloud` Ansible Collection.
+Ansible OpenStack collection aka `openstack.cloud` provides Ansible modules and Ansible plugins for managing OpenStack
+clouds. It is supported and maintained by the OpenStack community.
-The collection includes the Openstack modules and plugins supported by Openstack community to help the management of Openstack infrastructure.
+**NOTE:** We need and value your contributions! Maintaining this collection is a community effort. We are all both users
+and developers of this collection at the same time. If you find a bug, please report it. If you have fixed a bug, please
+submit a patch. If you need new functionality which is not covered by this collection yet, please extend an existing
+module or submit a new one. Our [Contributing](#contributing) section below has tons of docs to check out. Please get in
+touch!
-## Breaking backward compatibility :warning:
+## Branches and Non Backward Compatibility ⚠️
-Dear contributors and users of the Ansible OpenStack collection!
-Our codebase has been split into two separate release series:
+Our codebase has been split into two separate release series, `2.x.x` and `1.x.x`:
-* `2.x.x` releases of Ansible OpenStack collection are compatible with OpenStack SDK `1.x.x` and its release candidates
- `0.99.x` *only* (OpenStack Zed and later). Our `master` branch tracks our `2.x.x` releases.
-* `1.x.x` releases of Ansible OpenStack collection are compatible with OpenStack SDK `0.x.x` prior to `0.99.0` *only*
- (OpenStack Yoga and earlier). Our `stable/1.0.0` branch tracks our `1.x.x` releases.
+* `2.x.x` releases of Ansible OpenStack collection are compatible with [OpenStack SDK][openstacksdk] `1.x.x` and its
+ release candidates `0.99.0` and later *only* (OpenStack Zed and later). Our `master` branch tracks our `2.x.x`
+ releases.
+* `1.x.x` releases of Ansible OpenStack collection are compatible with [OpenStack SDK][openstacksdk] `0.x.x` prior to
+ `0.99.0` *only* (OpenStack Yoga and earlier). Our `stable/1.0.0` branch tracks our `1.x.x` releases.
+* `2.x.x` releases of Ansible OpenStack collection are not backward compatible to `1.x.x` releases ⚠️
-Both branches will be developed in parallel for the time being. Patches from `master` will be backported to
-`stable/1.0.0` on a best effort basis but expect new features to be introduced in our `master` branch only.
-Contributions are welcome for both branches!
-Differences between both branches are mainly renamed and sometimes dropped module return values. We try to keep our
-module parameters backward compatible by offering aliases but e.g. the semantics of `filters` parameters in `*_info`
-modules have changed due to updates in the OpenStack SDK.
+For rationale and details please read our [branching docs](docs/branching.md). Both branches will be developed in
+parallel for the time being. Patches from `master` will be backported to `stable/1.0.0` on a best effort basis but
+expect new features to be introduced in our `master` branch only. Contributions are welcome for both branches!
-Our decision to break backward compatibility was not taken lightly. OpenStack SDK's first major release (`1.0.0` and its
-release candidates `0.99.x`) has streamlined and improved large parts of its codebase. For example, its Connection
-interface now consistently uses the Resource interfaces under the hood. This required breaking changes from older SDK
-releases though. The Ansible OpenStack collection is heavily based on OpenStack SDK. With OpenStack SDK becoming
-backward incompatible, so does our Ansible OpenStack collection. We simply lack the devpower to maintain a backward
-compatible interface in Ansible OpenStack collection across several SDK releases.
+[openstacksdk]: https://opendev.org/openstack/openstacksdk
-Our first `2.0.0` release is currently under development and we still have a long way to go. If you use modules of the
-Ansible OpenStack collection and want to join us in porting them to the upcoming OpenStack SDK, please contact us!
-Ping Jakob Meng <mail@jakobmeng.de> (jm1) or Rafael Castillo <rcastill@redhat.com> (rcastillo) and we will give you a
-quick introduction. We are also hanging around on `irc.oftc.net/#openstack-ansible-sig` and `irc.oftc.net/#oooq` 😎
+## Installation
-We have extensive documentation on [why, what and how we are adopting and reviewing the new modules](
-https://hackmd.io/szgyWa5qSUOWw3JJBXLmOQ?view), [how to set up a working DevStack environment for hacking on the
-collection](https://hackmd.io/PI10x-iCTBuO09duvpeWgQ?view) and, most importantly, [a list of modules where we are
-coordinating our porting efforts](https://hackmd.io/7NtovjRkRn-tKraBXfz9jw?view).
+For using this collection, first you have to install both Python packages `ansible` and `openstacksdk` on your Ansible
+controller:
-## Installation and Usage
-
-### Installing dependencies
-
-For using the Openstack Cloud collection firstly you need to install `ansible` and `openstacksdk` Python modules on your Ansible controller.
-For example with pip:
-
-```bash
-pip install "ansible>=2.9" "openstacksdk>=0.36,<0.99.0"
+```sh
+pip install "ansible>=2.9" "openstacksdk>=1.0.0"
```
-OpenStackSDK has to be available to Ansible and to the Python interpreter on the host, where Ansible executes the module (target host).
-Please note, that under some circumstances Ansible might invoke a non-standard Python interpreter on the target host.
-Using Python version 3 is highly recommended for OpenstackSDK and strongly required from OpenstackSDK version 0.39.0.
-
----
-
-#### NOTE
-
-OpenstackSDK is better to be the last stable version. It should NOT be installed on Openstack nodes,
-but rather on operators host (aka "Ansible controller"). OpenstackSDK from last version supports
-operations on all Openstack cloud versions. Therefore OpenstackSDK module version doesn't have to match
-Openstack cloud version usually.
+[OpenStack SDK][openstacksdk] has to be available on the Ansible host running the OpenStack modules. Depending on the
+Ansible playbook and roles you use, this host is not necessarily the Ansible controller. Sometimes Ansible might invoke
+a non-standard Python interpreter on the target Ansible host. Using Python 3.6 is required for modules in this
+collection.
----
+Always use the last stable version of [OpenStack SDK][openstacksdk] if possible, also when running against older
+OpenStack deployments. OpenStack SDK is backward compatible to older OpenStack deployments, so its safe to run last
+version of the SDK against older OpenStack clouds. The installed version of the OpenStack SDK does not have to match
+your OpenStack cloud, but it has to match the release series of this collection which you are using. For notes about
+our release series and branches please read the introduction above.
-### Installing the Collection from Ansible Galaxy
+Before using this collection, you have to install it with `ansible-galaxy`:
-Before using the Openstack Cloud collection, you need to install the collection with the `ansible-galaxy` CLI:
-
-`ansible-galaxy collection install openstack.cloud`
+```sh
+ansible-galaxy collection install openstack.cloud
+```
-You can also include it in a `requirements.yml` file and install it through `ansible-galaxy collection install -r requirements.yml` using the format:
+You can also include it in a `requirements.yml` file:
```yaml
collections:
- name: openstack.cloud
```
-### Playbooks
+And then install it with:
-To use a module from the Openstack Cloud collection, please reference the full namespace, collection name, and module name that you want to use:
+```sh
+ansible-galaxy collection install -r requirements.yml
+```
+
+## Usage
+
+To use a module from the Ansible OpenStack collection, call them by their Fully Qualified Collection Name (FQCN),
+composed of their namespace, collection name and module name:
```yaml
---
-- name: Using Openstack Cloud collection
- hosts: localhost
+- hosts: localhost
tasks:
- - openstack.cloud.server:
+ - name: Create server in an OpenStack cloud
+ openstack.cloud.server:
name: vm
state: present
cloud: openstack
@@ -103,12 +93,12 @@ Or you can add the full namespace and collection name in the `collections` eleme
```yaml
---
-- name: Using Openstack Cloud collection
- hosts: localhost
+- hosts: localhost
collections:
- openstack.cloud
tasks:
- - server_volume:
+ - name: Create server in an OpenStack cloud
+ server_volume:
state: present
cloud: openstack
server: Mysql-server
@@ -116,49 +106,126 @@ Or you can add the full namespace and collection name in the `collections` eleme
device: /dev/vdb
```
-### Usage
+For powerful generic [CRUD][crud]-style resource management use Ansible module
+[`openstack.cloud.resource`](plugins/modules/resource.py):
-See the collection docs at Ansible site:
+```yaml
+---
+- hosts: localhost
+ tasks:
+ - name: Create security group
+ openstack.cloud.resource:
+ cloud: openstack
+ service: network
+ type: security_group
+ attributes:
+ name: ansible_security_group
+ description: 'ansible security group'
+
+ - name: Update security group description
+ openstack.cloud.resource:
+ cloud: openstack
+ service: network
+ type: security_group
+ attributes:
+ name: ansible_security_group
+ description: 'ansible neutron security group'
+
+ - name: Delete security group
+ openstack.cloud.resource:
+ cloud: openstack
+ service: network
+ type: security_group
+ attributes:
+ name: ansible_security_group
+ state: absent
+```
-* [openstack.cloud collection docs (version released in Ansible package)](https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html)
+For generic resource listing use Ansible module [`openstack.cloud.resources`](plugins/modules/resources.py):
-* [openstack.cloud collection docs (devel version)](https://docs.ansible.com/ansible/devel/collections/openstack/cloud/index.html)
+```yaml
+---
+- hosts: localhost
+ tasks:
+ - name: List images
+ openstack.cloud.resources:
+ cloud: openstack
+ service: image
+ type: image
-## Contributing
+ - name: List compute flavors
+ openstack.cloud.resources:
+ cloud: openstack
+ service: compute
+ type: flavor
+
+ - name: List networks with name 'public'
+ openstack.cloud.resources:
+ cloud: openstack
+ service: network
+ type: network
+ parameters:
+ name: public
+```
-For information on contributing, please see [CONTRIBUTING](https://opendev.org/openstack/ansible-collections-openstack/src/branch/master/CONTRIBUTING.rst)
+[Ansible module defaults][ansible-module-defaults] are supported as well:
-There are many ways in which you can participate in the project, for example:
+```yaml
+---
+- module_defaults:
+ group/openstack.cloud.openstack:
+ cloud: devstack-admin
+ #
+ #
+ # Listing modules individually is required for
+ # backward compatibility with Ansible 2.9 only
+ openstack.cloud.compute_flavor_info:
+ cloud: devstack-admin
+ openstack.cloud.server_info:
+ cloud: devstack-admin
+ block:
+ - name: List compute flavors
+ openstack.cloud.compute_flavor_info:
+
+ - name: List servers
+ openstack.cloud.server_info:
+```
-- Submit [bugs and feature requests](https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack), and help us verify them
-- Submit and review source code changes in [Openstack Gerrit](https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack)
-- Add new modules for Openstack Cloud
+[ansible-module-defaults]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html
+[crud]: https://en.wikipedia.org/wiki/CRUD
-We work with [OpenDev Gerrit](https://review.opendev.org/), pull requests submitted through GitHub will be ignored.
+## Documentation
-## Testing and Development
+See collection docs at Ansible's main page:
-If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATHS`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
+* [openstack.cloud collection docs (version released in Ansible package)](
+ https://docs.ansible.com/ansible/latest/collections/openstack/cloud/index.html)
-### Testing with `ansible-test`
+* [openstack.cloud collection docs (devel version)](
+ https://docs.ansible.com/ansible/devel/collections/openstack/cloud/index.html)
-We use `ansible-test` for sanity:
+## Contributing
-```bash
-tox -e linters
-```
+Thank you for your interest in our Ansible OpenStack collection ☺️
+
+There are many ways in which you can participate in the project, for example:
-## More Information
+- [Report and verify bugs and help with solving issues](
+ https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack).
+- [Submit and review patches](
+ https://review.opendev.org/#/q/project:openstack/ansible-collections-openstack).
+- Follow OpenStack's [How To Contribute](https://wiki.openstack.org/wiki/How_To_Contribute) guide.
-TBD
+Please read our [Contributions and Development Guide](docs/contributing.md) (⚠️) and our [Review Guide](
+docs/reviewing.md) (⚠️) before sending your first patch. Pull requests submitted through GitHub will be ignored.
## Communication
-We have a dedicated Interest Group for Openstack Ansible modules.
-You can find other people interested in this in `#openstack-ansible-sig` on [OFTC IRC](https://www.oftc.net/).
+We have a Special Interest Group for the Ansible OpenStack collection. Join us in `#openstack-ansible-sig` on
+[OFTC IRC](https://www.oftc.net/) 🍪
## License
GNU General Public License v3.0 or later
-See [LICENCE](https://opendev.org/openstack/ansible-collections-openstack/src/branch/master/COPYING) to see the full text.
+See [LICENCE](COPYING) to see the full text.
diff --git a/ansible_collections/openstack/cloud/docs/branching.md b/ansible_collections/openstack/cloud/docs/branching.md
new file mode 100644
index 000000000..c0c01c8a7
--- /dev/null
+++ b/ansible_collections/openstack/cloud/docs/branching.md
@@ -0,0 +1,115 @@
+# Ansible OpenStack Collection and its branches
+
+Our codebase has been split into two separate release series, `2.x.x` and `1.x.x`:
+
+* `2.x.x` releases of Ansible OpenStack collection are compatible with [OpenStack SDK][openstacksdk] `1.x.x` and its
+ release candidates `0.99.0` and later *only* (OpenStack Zed and later). Our [`master` branch][a-c-o-branch-master]
+ tracks our `2.x.x` releases.
+* `1.x.x` releases of Ansible OpenStack collection are compatible with [OpenStack SDK][openstacksdk] `0.x.x` prior to
+ `0.99.0` *only* (OpenStack Yoga and earlier). Our [`stable/1.0.0` branch][a-c-o-branch-stable-1-0-0] tracks our
+ `1.x.x` releases.
+* `2.x.x` releases of Ansible OpenStack collection are not backward compatible to `1.x.x` releases ⚠️
+
+Both branches will be developed in parallel for the time being. Patches from `master` will be backported to
+`stable/1.0.0` on a best effort basis but expect new features to be introduced in our `master` branch only.
+Contributions are welcome for both branches!
+
+Our decision to break backward compatibility was not taken lightly. OpenStack SDK's first major release (`1.0.0` and its
+release candidates >=`0.99.0`) has streamlined and improved large parts of its codebase. For example, its Connection
+interface now consistently uses the Resource interfaces under the hood. [This required breaking changes from older SDK
+releases though][openstacksdk-release-notes-zed]. The Ansible OpenStack collection is heavily based on OpenStack SDK.
+With OpenStack SDK becoming backward incompatible, so does our Ansible OpenStack collection. For example, with
+openstacksdk `>=0.99.0` most Ansible modules return dictionaries instead `Munch` objects and many of their keys have
+been renamed. We simply lack the development resources to maintain a backward compatible interface in Ansible OpenStack
+collection across several SDK releases.
+
+[a-c-o-branch-master]: https://opendev.org/openstack/ansible-collections-openstack/src/branch/master
+[a-c-o-branch-stable-1-0-0]: https://opendev.org/openstack/ansible-collections-openstack/src/branch/stable/1.0.0
+[ansible-tags]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_tags.html
+[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
+[openstacksdk-release-notes-zed]: https://docs.openstack.org/releasenotes/openstacksdk/zed.html
+[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
+[openstacksdk]: https://opendev.org/openstack/openstacksdk
+
+## Notable changes between release series 2.x.x and 1.x.x
+
+When we ported our collection to [openstacksdk][openstacksdk] `>=0.99.0`, a series of changes were applied to our
+`master` branch. We went through each module in our collection and did the following:
+
+* Identify function calls which use [openstacksdk][openstacksdk]'s cloud layer, e.g. `self.conn.get_network()`. Change
+ these calls to functions from openstacksdk's resource proxies, e.g. `self.conn.network.find_network()`, if possible.
+ As a guideline use this decision tree:
+ - If a functionality requires a single api call (to the OpenStack API), then use functions from openstacksdk's
+ resource proxies.
+ - If a functionality requires multiple api calls (to the OpenStack API), e.g. when creating and attaching a floating
+ ip to a server, then use functions from openstacksdk's cloud layer.
+ - When unsure which of openstacksdk's layers to use, then first go to resource proxies and then to its cloud layer.
+ Mainly this applies to functions retrieving information, i.e. all calls where we get info about cloud resources
+ should be changed to openstacksdk functions which return proxy resources.
+ **Note**: Using openstacksdk's cloud layer for functionality which is not provided by openstacksdk's proxy layer is
+ acceptable. [openstacksdk's cloud layer is not going away][openstacksdk-cloud-layer-stays]. For example, listing
+ functions in openstacksdk's cloud layer such as `search_users()` often allow to filter results with function parameter
+ `filters`. openstacksdk's proxy layer does not provide an equivalent and thus using `search_users()` is fine.
+* Functions in openstacksdk's cloud layer often have different return values then pre-0.99.0 releases. When return
+ values have changed in any of the functions which a module uses, update `RETURN` variable. If a module has no `RETURN`
+ variable, define it.
+* Only return data types such as lists or dictionaries to Ansible. For example, the return statement
+ `self.exit_json(changed=False, floating_ips=floating_ips)` in module [`floating_ip_info`](
+ ../plugins/modules/floating_ip_info.py) shall return a list of `dict`'s. Use openstacksdk's `to_dict` function to
+ convert resources to dictionaries. Setting its parameters such as `computed` to `False` will drop computed attributes
+ from the resulting dict. Read [`to_dict`'s docstring][openstacksdk-to-dict] for more parameters. Using `to_dict` might
+ change the return values of your Ansible module. Please document changes to return values in `RETURN`.
+* Older openstacksdk releases did not provide the `to_dict` function. We decided to allow breaking backward
+ compatibility with release `2.x.x`, so workarounds such as `(o.to_dict() if hasattr(o, 'to_dict') else dict(o))` are
+ not required anymore and shall be avoided.
+* Manually dropping attributes such as `location` or `link` from openstacksdk resources is no longer necessary.
+ Workarounds such as
+ ```Python
+ for raw in self.conn.block_storage.backups(**attrs):
+ dt = raw.to_dict()
+ dt.pop('location')
+ data.append(dt)
+ ```
+ are no longer necessary and can be removed.
+* Add tests to [ci/run-collection.yml](../ci/run-collection.yml) and [ci/roles](../ci/roles). Each module has a
+ dedicated Ansible role with tests in `ci/roles`. Create one if no such directory exist.
+* With release of openstacksdk 0.99.0 most of our CI tests in [ci/](../ci/) failed. To prove that module patches
+ actually fix issues all CI tests for unrelated broken modules have to be skipped. To run CI tests for patched modules
+ only, temporarily list the [Ansible tags][ansible-tags] of all CI tests which should run in
+ `vars: { tox_extra_args: ... }` of job `ansible-collections-openstack-functional-devstack-ansible` in `.zuul.yaml`
+ ([example](https://review.opendev.org/c/openstack/ansible-collections-openstack/+/825291/16/.zuul.yaml)) and send the
+ patch for review. Once all CI tests are passing in Zuul CI, undo changes to [`.zuul.yaml`](../.zuul.yaml), i.e. revert
+ changes to `tox_extra_args` and submit final patch for review.
+* ~~Cherry-pick or backport patches for `master` branch to `stable/1.0.0` branch. Both branches should divert only if
+ necessary in order to keep maintainence of two separate branches simple. When applying patches to the `stable/1.0.0`
+ branch, it is often necessary to make changes to not break backward compatibility on the `stable/1.0.0` branch. On
+ `master` we use `.to_dict(computed=False)` which we have to change to `.to_dict(computed=True)` on `stable/1.0.0`. For
+ example, this [patch for `master` branch](
+ https://review.opendev.org/c/openstack/ansible-collections-openstack/+/828108) has been [tweaked and cherry-picked to
+ `stable/1.0.0` branch](https://review.opendev.org/c/openstack/ansible-collections-openstack/+/836312).~~
+ Backporting patches from `master` to `stable/1.0.0` branch have been abandoned due to lack of time and resources ⚠️
+* Version checks in modules are no longer necessary because we require openstacksdk >=0.99.0 globally. For example,
+ drop `min_ver`/`max_ver` constraints on module arguments.
+* Rename module parameter names to the attribute names that openstacksdk uses, e.g. `shared` becomes `is_shared`. Keep
+ old names as aliases for input backward compatibility.
+* Some modules have if-else branches for handling cases where a `name` is given. For most modules these can be dropped
+ safely because names can be passed as a query parameter.
+* Some modules do not use `name` as module parameters for resource names. For example, `port` module had an attribute
+ called `port` instead of `name`. Rename those attributes to `name` to be consistent with other modules and because
+ openstacksdk is doing the same. Add old attribute names as aliases to keep input backward compatibility.
+* Replacing `self.conn.get_*` with `self.conn.*.find_*` functions provide a `ignore_missing=False` parameter. This
+ allows to drop `self.fail_json()` calls in modules. Less code means less to maintain.
+* Some modules pass `ignore_missing=True` to `self.conn.*.find_*` functions and then fail if the return value is `None`.
+ Often this code can be simplified by changing `ignore_missing` to `False` and dropping the if-else branches.
+* When module attribute that have choices, always doubt its values. The module code was probably written long ago and
+ the choices given might be outdated. It might also make sense to drop the `choices` parameter completely when choices
+ are to narrow and might soon be outdated again.
+* Check comments whether they are still relevant.
+* Sanity check existing integration tests. For example, return values of module calls should be tested else running a
+ test could be useless in the first place.
+* Most functions in openstacksdk's cloud layer no longer return `Munch` objects. Instead they return resources which
+ should be converted to dictionaries. Update `RETURN` docs in modules, e.g. change from `type: complex` to
+ `type: dict`.
+* Move list of expected module results to role defaults, e.g. define a variable `expected_fields`. This enables easier
+ reuse.
+* Following and applying our [development guide](contributing.md) and [review guide](reviewing.md)
diff --git a/ansible_collections/openstack/cloud/docs/contributing.md b/ansible_collections/openstack/cloud/docs/contributing.md
new file mode 100644
index 000000000..d1026d818
--- /dev/null
+++ b/ansible_collections/openstack/cloud/docs/contributing.md
@@ -0,0 +1,191 @@
+# Development Guide for Ansible OpenStack Collection
+
+Ansible OpenStack collection is a set of Ansible modules for interacting with the OpenStack API as either an admin or an
+end user.
+
+We, and the OpenStack community in general, use OpenDev for its development. Patches are submitted to [OpenDev Gerrit][
+opendev-gerrit]. Pull requests submitted through GitHub will be ignored. Please read OpenStack's [Developer Workflow][
+openstack-developer-workflow] for details.
+
+For hacking on the Ansible OpenStack collection it helps to [prepare a DevStack environment](devstack.md) first.
+
+## Hosting
+
+* [Bug tracker][storyboard]
+* [Mailing list `openstack-discuss@lists.openstack.org`][openstack-discuss].
+ Prefix subjects with `[aoc]` or `[aco]` for faster responses.
+* [Code Hosting][opendev-a-c-o]
+* [Code Review][gerrit-a-c-o]
+
+## Branches
+
+For rationale behind our `master` and `stable/1.0.0` branches and details on our relation to [openstacksdk][
+openstacksdk], please read our [branching docs](branching.md).
+
+## Examples
+
+* For an example on how to write a `*_info` module, have a look at modules [`openstack.cloud.identity_role_info`](
+ ../plugins/modules/identity_role_info.py) or [`openstack.cloud.neutron_rbac_policies_info`](
+ ../plugins/modules/neutron_rbac_policies_info.py).
+* For an example on how to write a regular non-`*_info` module, have a look at module
+ [`openstack.cloud.federation_idp`](../plugins/modules/federation_idp.py) or any other module which uses
+ [`class StateMachine`](../plugins/module_utils/resource.py).
+* Do NOT use modules which define a `_system_state_change` function as examples, because they often do not properly
+ define Ansible's check mode, idempotency and/or updates. Refer to modules which use [`class StateMachine`](
+ ../plugins/module_utils/resource.py). In cases where using `class StateMachine` would cause code bloat, it might help
+ to look at modules which define a `_will_change` function instead.
+
+## Naming
+
+* This collection is named `openstack.cloud`. There is no need for further namespace prefixing.
+* Name any module that a cloud consumer would expect from [openstackclient (OSC)][openstackclient], for example `server`
+ instead of `nova`. This naming convention acknowledges that the end user does not care which service manages the
+ resource - that is a deployment detail. For example, cloud consumers may not know whether their floating ip address
+ are managed by Nova or Neutron.
+
+## Interface
+
+* If the resource being managed has an `id`, it should be returned.
+* If the resource being managed has an associated object more complex than an `id`, that should be returned instead of
+ the `id`.
+* Modules should return a value of type `dict`, `list` or other primitive data types. For example, `floating_ips` in
+ `self.exit_json(changed=False, floating_ips=floating_ips)` should to be a list of `dict`s. Use `to_dict()` on
+ [openstacksdk][openstacksdk] objects to convert resources to dictionaries. Setting its parameters such as `computed`
+ to `False` will drop computed attributes from the resulting dict. Read [`to_dict`'s docstring][openstacksdk-to-dict]
+ for more parameters.
+* Module results have to be documented in `RETURN` docstring.
+* We should document which attribute cannot be updated in `DOCUMENTATION` variable. For example, insert
+ `'This attribute cannot be updated.'` to `DOCUMENTATION` like we did for the `server` module and others.
+* Sorting module options in `DOCUMENTATION`, attributes in `RETURN`, entries in `argument_spec` and expected fields in
+ integration tests will make reviewing easier and faster.
+
+## Interoperability
+
+* It should be assumed that the cloud consumer does not know details about the deployment choices their cloud provider
+ made. A best effort should be made to present one sane interface to the Ansible user regardless of deployer choices.
+* It should be assumed that a user may have more than one cloud account that they wish to combine as part of a single
+ Ansible-managed infrastructure.
+* All modules should work appropriately against all existing versions of OpenStack regardless of upstream EOL status.
+ The reason for this is that the Ansible modules are for consumers of cloud APIs who are not in a position to impact
+ what version of OpenStack their cloud provider is running. It is known that there are OpenStack Public Clouds running
+ rather old versions of OpenStack, but from a user point of view the Ansible modules can still support these users
+ without impacting use of more modern versions.
+
+## Coding Guidelines
+
+* Modules should
+ + be idempotent (not being idempotent requires a solid reason),
+ + return whether something has `changed`,
+ + support `check mode`,
+ + be based on (be subclasses of) `OpenStackModule` in
+ `ansible_collections.openstack.cloud.plugins.module_utils.openstack`,
+ + should include `extends_documentation_fragment: openstack` in their `DOCUMENTATION` docstring,
+ + be registered in `meta/action_groups.yml` for enabling the variables to be set in
+ [group level][ansible-module-defaults].
+* Complex functionality, cloud interaction or interoperability code should be moved to [openstacksdk][openstacksdk].
+* OpenStack API interactions should happen via [openstacksdk][openstacksdk] and not via OpenStack component libraries.
+ The OpenStack component libraries do no have end users as a primary audience, they are for intra-server communication.
+* When a resource exist and should be deleted (absent), then pass the resource to the `delete_*` function, not its name.
+ Passing a name requires openstacksdk to find that resource again, doing a unnecessary api call, because we queried the
+ resource before.
+* `*_info` modules never raise exceptions when resources cannot be found. When resources cannot be found, then a
+ `*_info` module returns an empty list instead. For example, module `openstack.cloud.neutron_rbac_policies_info` will
+ return an empty list when no project with name given in module parameter `project` can be found.
+* When a id is given in `*_info` modules, then we do not need nor want extra code to handle that. Instead most
+ [openstacksdk][openstacksdk] resources allow to pass ids as query arguments to OpenStack API. For example,
+ `identity.identity_providers()` can be used for both cases: Where an id is given and where no id is given. No need to
+ call `get_identity_provider()`.
+* `EXAMPLES` docstring in modules (and Ansible's own modules) consist of a list of tasks. They do not contain YAML
+ directives end marker line (---) and do not define playbooks (e.g. hosts keyword). They shall be simple, e.g. do not
+ do fancy loops, heavy use of variables or use Ansible directives for no apparent reason such as ignore_errors or
+ register.
+* `self.params.get('...')` can be replaced with `self.params['...']` because parameters from `argument_spec` will always
+ be in `self.params`. If not defined differently, they have a default value of `None`.
+* Writing code to check that some options cannot be updated and to fail if user still tries to update that value is most
+ often not worth it. It would require much more code to catch all cases where updates are impossible and we would have
+ to implement it consistently across modules. Atm we are fine with documenting which attribute cannot be updated in
+ `DOCUMENTATION` variable. We could simply drop these checks and insert `'This attribute cannot be updated.'` to
+ `DOCUMENTATION` like we did for the server module and others.
+* [openstacksdk][openstacksdk] functions often accept IDs but no names, e.g. `find_address_scope()` and
+ `create_address_scope()` accept a `project_id` parameter. Most modules in our collection use names for finding
+ resources, so we want to support the same for resources attributes such as `project_id` in `AddressScope`.
+* Constraints for module parameters and error handling can often be implemented in `argument_spec` or `module_kwargs`
+ `module_kwargs` allows to define dependencies between module options such as [`mutually_exclusive`,
+ `required_together`, `required_if` etc.][ansible-argument-spec-dependencies].
+* When using [openstacksdk][openstacksdk]'s `find_*` functions (`self.conn.*.find_*`), then pass `ignore_missing=False`
+ instead of checking its return value and failing with `self.fail_json()` if it is `None`.
+* Use module option names which match attribute names used in [openstacksdk][openstacksdk], e.g. use `is_shared` instead
+ of `shared`. When refactoring modules, keep old option names as aliases to keep backward compatibility. Using
+ openstacksdk names provides two benefits:
+ - The module inputs and outputs do match, are consistent and thus the module is easier to use.
+ - Most code for filters and query arguments can be replaced with loops. [This patch for floating_ip_info has some
+ ideas for how to write loops](https://review.opendev.org/c/openstack/ansible-collections-openstack/+/828613).
+* Use functions from [openstacksdk][openstacksdk]'s proxy layer instead of its cloud layer, if possible. For example,
+ use `self.conn.network.find_network()`, not `self.conn.get_network()`. As a guideline use this decision tree:
+ - If a functionality requires a single api call (to the OpenStack API), then use functions from openstacksdk's proxy
+ layer.
+ - If a functionality requires several api calls (to the OpenStack API), e.g. when creating and attaching a floating ip
+ to a server, then use functions from openstacksdk's cloud layer.
+ - When unsure which of openstacksdk's layers to use, then first go to proxy layer, then to its cloud layer and if this
+ is not sufficient, then use its resource layer. Mainly, this applies to functions retrieving information, i.e. all
+ calls where we get info about cloud resources should be changed to openstacksdk functions which return proxy
+ resources.
+ - It is perfectly fine to use openstacksdk's cloud layer for functionality which is not provided by openstacksdk's
+ proxy layer. [SDK's cloud layer is not going away][openstacksdk-cloud-layer-stays].
+ For example, `list_*` functions from openstacksdk's cloud layer such as `search_users()` allow to filter retrieved
+ results with function parameter `filters`. openstacksdk's proxy layer does not provide an equivalent and thus the
+ use of `search_users()` is perfectly fine.
+
+## Testing
+
+* Modules have to be tested with CI integration tests (if possible).
+* Each module has a corresponding Ansible role containing integration tests in [`ci/roles`](../ci/roles) directory.
+* Ensure role names of integration tests in [`ci/roles`](../ci/roles) match the module names.
+ Only exception are `*_info` modules: Their integration tests are located in the same Ansible roles as their
+ non-`*_info` equivalents (to reduce redundant code). For example, tests for both modules `federation_mapping` and
+ `federation_mapping_info` can be found in role `federation_mapping`.
+* Zuul CI jobs are defined in [`.zuul.yaml`](../.zuul.yaml).
+* Add assertions on return values from Ansible modules in integration tests. For an example, refer to
+ [`ci/roles/floating_ip/tasks/main.yml`](../ci/roles/floating_ip/tasks/main.yml).
+ We need those checks to validate return values from [openstacksdk][openstacksdk], which might change across releases.
+ Adding those assertions will be done in minutes, while checking the output manually during code reviews takes much
+ more time.
+* Our Zuul CI jobs will run `ansible-test` for sanity checking.
+* Use `tox -elinters_latest` to run various linters against your code.
+
+## Upload
+
+* Study our [Review Guidelines](reviewing.md) before submitting a patch.
+* Use Gerrit's work-in-progress feature to mark the status of the patch. A minus workflow (-w) will be reset when a new
+ patchset is uploaded and hence easy to miss.
+* When you edit a patch, first rebase your patch on top of the current branch. Sometimes we replace code in all modules
+ which might cause merge conflicts for you otherwise. For example, we dropped all options with default values from
+ `argument_spec` such as `required=False`.
+
+## Release
+
+Read [Release Guide](releasing.md) on how to publish new releases.
+
+## Permissions
+
+* Only [members of group `ansible-collections-openstack-core`][group-a-c-o-core] are allowed to merge patches.
+* Only [members of group `ansible-collections-openstack-release`][group-a-c-o-release] are allowed to push tags and
+ trigger our release job `ansible-collections-openstack-release` in [galaxy.yml](../galaxy.yml).
+* Only members of `openstack` namespace in Ansible Galaxy are allowed to apply changes to meta properties of Ansible
+ collection [`openstack.cloud`][ansible-galaxy-openstack-cloud] on Ansible Galaxy.
+
+[ansible-argument-spec-dependencies]: https://docs.ansible.com/ansible/latest/dev_guide/developing_program_flow_modules.html#argument-spec-dependencies
+[ansible-galaxy-openstack-cloud]: https://galaxy.ansible.com/openstack/cloud
+[ansible-module-defaults]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html
+[gerrit-a-c-o]: https://review.opendev.org/q/status:open+project:openstack/ansible-collections-openstack
+[group-a-c-o-core]: https://review.opendev.org/admin/groups/0e01228e912733e8b9a8d957631e41665aa0ffbd,members
+[group-a-c-o-release]: https://review.opendev.org/admin/groups/8bca2018f3710f94374aee4b3c9771b9ff0a2254,members
+[opendev-a-c-o]: https://opendev.org/openstack/ansible-collections-openstack
+[opendev-gerrit]: https://review.opendev.org/
+[openstack-developer-workflow]: https://docs.openstack.org/infra/manual/developers.html#development-workflow
+[openstack-discuss]: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+[openstackclient]: https://docs.openstack.org/python-openstackclient/latest/
+[openstacksdk-cloud-layer-stays]: https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html
+[openstacksdk-to-dict]: https://opendev.org/openstack/openstacksdk/src/branch/master/openstack/resource.py
+[openstacksdk]: https://opendev.org/openstack/openstacksdk
+[storyboard]: https://storyboard.openstack.org/#!/project/openstack/ansible-collections-openstack
diff --git a/ansible_collections/openstack/cloud/docs/devstack.md b/ansible_collections/openstack/cloud/docs/devstack.md
new file mode 100644
index 000000000..fb8e86682
--- /dev/null
+++ b/ansible_collections/openstack/cloud/docs/devstack.md
@@ -0,0 +1,107 @@
+# Preparing a DevStack environment for Ansible collection development
+
+For developing on the Ansible OpenStack collection, it helps to install DevStack and two Python [`virtualenv`][
+virtualenv]s, one with [openstacksdk][openstacksdk] `<0.99.0` and one with [openstacksdk][openstacksdk] `>=1.0.0` (or
+one of its release candidates `>=0.99.0`). The first is for patches against our `stable/1.0.0` branch of the collection,
+while the newer openstacksdk is for patches against our `master` branch.
+
+First, [follow DevStack's guide][devstack] to set up DevStack on a virtual machine. An Ansible inventory and a playbook
+to set up your own local DevStack as a libvirt domain can be found in Ansible collection [`jm1.cloudy`][jm1-cloudy],
+look for host `lvrt-lcl-session-srv-200-devstack`.
+
+**Beware:** DevStack's purpose is to be set up quickly and destroyed after development or testing is done. It cannot
+be rebooted safely or upgraded easily.
+
+Some Ansible modules and unit tests in the Ansible OpenStack collection require additional DevStack plugins which
+are not enabled by default. [Plugins are enabled in DevStack's `local.conf`][devstack-plugins]. Examples:
+
+- Use the DevStack configuration which the Zuul CI jobs are applying when testing the Ansible OpenStack collection. For
+ example, go to the logs of job [`ansible-collections-openstack-functional-devstack`][devstack-jobs] and use file
+ `controller/logs/local_conf.txt` as your `local.conf` for DevStack.
+- https://gist.github.com/sshnaidm/43ca23c3f23bd6015d18868ac7405a13
+- https://paste.opendev.org/show/812460/
+
+For a list of plugins refer to [DevStack's plugin registry][devstack-plugin-registry].
+
+Next, prepare two Python [`virtualenv`][virtualenv]s, one with [openstacksdk][openstacksdk] `<0.99.0` and one with
+[openstacksdk][openstacksdk] `>=1.0.0` (or one of its release candidates `>=0.99.0`):
+
+```sh
+# DevStack is presumed to be installed on the development machine
+# and its configuration file available at ~/devstack/openrc
+
+git clone https://opendev.org/openstack/ansible-collections-openstack.git
+mkdir -p ~/.ansible/collections/ansible_collections/openstack/
+ln -s ansible-collections-openstack ~/.ansible/collections/ansible_collections/openstack/cloud
+
+# Prepare environment for developing patches against
+# Ansible OpenStack collection 2.x.x and openstacksdk>=0.99.0
+cd ansible-collections-openstack/
+git checkout master
+virtualenv -p python3 ~/.local/share/virtualenv/ansible-openstacksdk-1
+source ~/.local/share/virtualenv/ansible-openstacksdk-1/bin/activate
+pip install -r test-requirements.txt
+pip install git+https://opendev.org/openstack/openstacksdk
+pip install ipython
+source ~/devstack/openrc admin admin
+ipython
+
+cd ..
+
+# Prepare environment for developing patches against
+# Ansible OpenStack collection 1.x.x and openstacksdk<0.99.0
+virtualenv -p python3 ~/.local/share/virtualenv/ansible-openstacksdk-0
+source ~/.local/share/virtualenv/ansible-openstacksdk-0/bin/activate
+cd ansible-collections-openstack/
+git checkout stable/1.0.0
+pip install -r test-requirements.txt
+pip install 'openstacksdk<0.99.0'
+pip install ipython
+source ~/devstack/openrc admin admin
+ipython
+```
+
+The first IPython instance uses openstacksdk >=0.99.0 and is for developing at the 2.x.x series of the Ansible OpenStack
+collection. The second IPython instance uses openstacksdk <0.99.0 and is suited for the 1.x.x series of the collection.
+For example, type in each IPython instance:
+
+```python
+import openstack
+conn = openstack.connect()
+
+# optional
+openstack.enable_logging(debug=True)
+
+# and start hacking..
+list(conn.network.ips())[0].to_dict(computed=False)
+```
+
+To run the unit tests of the collection, run this in a Bash shell:
+
+```sh
+SDK_VER=$(python -c "import openstack; print(openstack.version.__version__)")
+ansible-playbook -vvv ci/run-collection.yml -e "sdk_version=${SDK_VER} cloud=devstack-admin cloud_alt=devstack-alt"
+```
+
+Use `ansible-playbook`'s `--tags` and `--skip-tags` parameters to skip CI tests. For a list of available tags, refer to
+[`ci/run-collection.yml`](../ci/run-collection.yml).
+
+Or run Ansible modules individually:
+
+```sh
+ansible localhost -m openstack.cloud.floating_ip -a 'server=ansible_server1 wait=true' -vvv
+```
+
+When submitting a patch with `git review`, our Zuul CI jobs will test your changes against different versions of
+openstacksdk, Ansible and DevStack. Refer to [`.zuul.yaml`](../.zuul.yaml) for a complete view of all CI jobs. To
+trigger experimental jobs, write a comment in Gerrit which contains `check experimental`.
+
+Happy hacking!
+
+[devstack-jobs]: https://zuul.opendev.org/t/openstack/builds?job_name=ansible-collections-openstack-functional-devstack&project=openstack/ansible-collections-openstack
+[devstack-plugin-registry]: https://docs.openstack.org/devstack/latest/plugin-registry.html
+[devstack-plugins]: https://docs.openstack.org/devstack/latest/plugins.html
+[devstack]: https://docs.openstack.org/devstack/latest/
+[jm1-cloudy]: https://github.com/JM1/ansible-collection-jm1-cloudy
+[openstacksdk]: https://opendev.org/openstack/openstacksdk/
+[virtualenv]: https://virtualenv.pypa.io/en/latest/
diff --git a/ansible_collections/openstack/cloud/docs/openstack_guidelines.rst b/ansible_collections/openstack/cloud/docs/openstack_guidelines.rst
deleted file mode 100644
index 8da91a4c9..000000000
--- a/ansible_collections/openstack/cloud/docs/openstack_guidelines.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-.. _OpenStack_module_development:
-
-OpenStack Ansible Modules
-=========================
-
-These are a set of modules for interacting with the OpenStack API as either an admin
-or an end user.
-
-.. contents::
- :local:
-
-Naming
-------
-
-* This is a collection named ``openstack.cloud``. There is no need for further namespace prefixing.
-* Name any module that a cloud consumer would expect to use after the logical resource it manages:
- ``server`` not ``nova``. This naming convention acknowledges that the end user does not care
- which service manages the resource - that is a deployment detail. For example cloud consumers may
- not know whether their floating IPs are managed by Nova or Neutron.
-
-Interface
----------
-
-* If the resource being managed has an id, it should be returned.
-* If the resource being managed has an associated object more complex than
- an id, it should also be returned.
-* Return format shall be a dictionary or list
-
-Interoperability
-----------------
-
-* It should be assumed that the cloud consumer does not know
- details about the deployment choices their cloud provider made. A best
- effort should be made to present one sane interface to the Ansible user
- regardless of deployer choices.
-* It should be assumed that a user may have more than one cloud account that
- they wish to combine as part of a single Ansible-managed infrastructure.
-* All modules should work appropriately against all existing versions of
- OpenStack regardless of upstream EOL status. The reason for this is that
- the Ansible modules are for consumers of cloud APIs who are not in a
- position to impact what version of OpenStack their cloud provider is
- running. It is known that there are OpenStack Public Clouds running rather
- old versions of OpenStack, but from a user point of view the Ansible
- modules can still support these users without impacting use of more
- modern versions.
-
-Libraries
----------
-
-* All modules should use ``OpenStackModule`` from
- ``ansible_collections.openstack.cloud.plugins.module_utils.openstack``
- as their base class.
-* All modules should include ``extends_documentation_fragment: openstack``.
-* All complex cloud interaction or interoperability code should be housed in
- the `openstacksdk <https://opendev.org/openstack/openstacksdk>`_
- library.
-* All OpenStack API interactions should happen via the openstackSDK and not via
- OpenStack Client libraries. The OpenStack Client libraries do no have end
- users as a primary audience, they are for intra-server communication.
-* All modules should be registered in ``meta/action_groups.yml`` for enabling the
- variables to be set in `group level
- <https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html>`_.
-
-Testing
--------
-
-* Integration testing is currently done in `OpenStack's CI system
- <https://opendev.org/openstack/ansible-collections-openstack/src/branch/master/.zuul.yaml>`_
diff --git a/ansible_collections/openstack/cloud/docs/releasing.md b/ansible_collections/openstack/cloud/docs/releasing.md
new file mode 100644
index 000000000..8babb3560
--- /dev/null
+++ b/ansible_collections/openstack/cloud/docs/releasing.md
@@ -0,0 +1,125 @@
+# Release process for Ansible OpenStack collection
+
+## Publishing to Ansible Galaxy
+
+1. Create entry in [changelog.yaml](../changelogs/changelog.yaml) with commits since last release.
+ * Modules should be in a separate section `modules`
+ * Bugfixes and minor changes in their sections
+2. Change version in [galaxy.yml](../galaxy.yml). Apply [Semantic Versioning](https://semver.org/):
+ * Increase major version for breaking changes or modules were removed
+ * Increase minor version when modules were added
+ * Increase patch version for bugfixes
+3. Run `antsibull-changelog release` command (run `pip install antsibull` before) to generate [CHANGELOG.rst](
+ ../CHANGELOG.rst) and verify correctness of generated files.
+4. Commit changes to `changelog.yaml` and `galaxy.yml`, submit patch and wait until it has been merged
+5. Tag the release with version as it's described in [OpenStack docs](
+ https://docs.opendev.org/opendev/infra-manual/latest/drivers.html#tagging-a-release):
+ * [Make sure you have a valid GnuPG key pair](
+ https://docs.github.com/en/authentication/managing-commit-signature-verification/generating-a-new-gpg-key)
+ * `git checkout <your_branch>`
+ * `git pull --ff-only`
+ * `git tag -s <version number>` where `<version number>` is your tag
+ * `git push gerrit <version number>`
+6. When your tag has been pushed in the previous step, our release job `ansible-collections-openstack-release`, defined
+ in [galaxy.yml](../galaxy.yml), will run automatically and publish a new release with your tag to [Ansible Galaxy](
+ https://galaxy.ansible.com/openstack/cloud). When it has finished, its status and logs can be accessed on [Zuul CI's
+ builds page](https://zuul.opendev.org/t/openstack/builds?job_name=ansible-collections-openstack-release).
+7. When release job `ansible-collections-openstack-release` has failed, you can manually build the collection locally
+ and publish your release to Ansible Galaxy:
+ * `git checkout <version number>` where `<version number>` is your tag
+ * Delete untracked files and directories with `git clean -n; git clean -fd`
+ * Build collection with `ansible-galaxy`, for example:
+ ```sh
+ ansible-galaxy collection build --force --output-path /path/to/collection/dir
+ ```
+ * On success you will find a `*.tar.gz` file in `/path/to/collection/dir`, e.g. `openstack-cloud-1.5.0.tar.gz`
+ * Go to [your content page on Ansible Galaxy](https://galaxy.ansible.com/my-content/namespaces), open namespace
+ `openstack`, click on `Upload New Version` and upload your release `*.tar.gz`, e.g. `openstack-cloud-1.5.0.tar.gz`.
+ Push collection tarballs to the `openstack.cloud` namespace requires membership in `openstack` namespace on Ansible
+ Galaxy.
+ * Instead of using Ansible Galaxy web interface, you could also upload your release from cli. For example:
+ ```sh
+ ansible-galaxy collection publish --token $API_GALAXY_TOKEN -v /path/to/openstack-cloud-1.5.0.tar.gz
+ ```
+ where `$API_GALAXY_TOKEN` is your API key from [Ansible Galaxy](https://galaxy.ansible.com/me/preferences).
+ * [Monitor import progress on Ansible Galaxy](https://galaxy.ansible.com/my-imports/) and act accordingly to issues.
+8. Announce new release to [The Bullhorn](https://github.com/ansible/community/wiki/News#the-bullhorn): Join
+ [Ansible Social room on Matrix](https://matrix.to/#/#social:ansible.com) and mention [newsbot](
+ https://matrix.to/#/@newsbot:ansible.im) to have your news item tagged for review for the next issue!
+
+## Publishing to Fedora
+
+**NOTE:** Before publishing an updated RPM for Fedora or RDO, contact Alfredo Moralejo Alonso <amoralej@redhat.com>
+(amoralej) or Joel Capitao <jcapitao@redhat.com> (jcapitao[m]) in `#rdo` on [OFTC IRC](https://www.oftc.net/) about the
+latest release process.
+
+**NOTE:** If your username is in Fedora's `admins` group, you can push your commit directly to Fedora's repository for
+Ansible OpenStack collection. Otherwise you will have to open pull requests to sent changes.
+
+1. Get familiar with packaging for Fedora. Useful resources are:
+ * [Fedora's Package Update Guide](https://docs.fedoraproject.org/en-US/package-maintainers/Package_Update_Guide/)
+ * [Fedora package source for Ansible OpenStack collection](
+ https://src.fedoraproject.org/rpms/ansible-collections-openstack)
+ * [Koji page for `ansible-collections-openstack`](https://koji.fedoraproject.org/koji/packageinfo?packageID=33611)
+ * [Bodhi's page `Create New Update`](https://bodhi.fedoraproject.org/updates/new)
+2. Install all necessary packaging tools, mainly `fedpkg`.
+3. Create a scratch space with `mkdir fedora-scm`.
+4. Fork Fedora repository [rpms/ansible-collections-openstack](
+ https://src.fedoraproject.org/rpms/ansible-collections-openstack).
+5. Clone [rpms/ansible-collections-openstack](https://src.fedoraproject.org/rpms/ansible-collections-openstack) with
+ `fedpkg clone rpms/ansible-collections-openstack`. Or clone your forked repository (something like
+ `https://src.fedoraproject.org/fork/sshnaidm/rpms/ansible-collections-openstack`) with
+ `fedpkg clone forks/sshnaidm/rpms/ansible-collections-openstack` where `sshnaidm` has to be replaced with your
+ username.
+6. `cd ansible-collections-openstack` and go to branch `rawhide` with `fedpkg switch-branch rawhide`.
+7. Download new collection sources from Ansible Galaxy using
+ `wget https://galaxy.ansible.com/download/openstack-cloud-<version_tag>.tar.gz` where `<version_tag>` is a your new
+ version, e.g. `1.10.0`. Or run `spectool -g *.spec` *after* having changed the `*.spec` file in the next step.
+8. Bump version in `*.spec` file as in this [example for `1.9.4`](
+ https://src.fedoraproject.org/rpms/ansible-collection-containers-podman/c/6dc5eb79a3aa082e062768993bed66675ff9d520):
+ ```diff
+ +Version: <version-tag>
+ +Release: 1%{?dist}
+ ```
+ and add changelog, sort of:
+ ```diff
+ +* Tue Jun 08 2021 Sagi Shnaidman <sshnaidm@redhat.com> - <version-tag>-1
+ +- Bump to <version-tag>-1
+ ```
+9. Upload sources you downloaded before with `fedpkg new-sources <version-tag>.tar.gz`.
+10. Optionally check build with `fedpkg mockbuild`.
+11. Verify and commit updated `*.spec` file with:
+ ```sh
+ fedpkg diff
+ fedpkg lint # run linters against your changes
+ fedpkg commit # with message such as 'Bumped Ansible OpenStack collection to <version-tag>'
+ ```
+12. Push changes for `rawhide` with `fedpkg push`.
+13. Ask Koji to build your package with `fedpkg build`.
+14. Optionally check [Koji's page for `ansible-collections-openstack`](
+ https://koji.fedoraproject.org/koji/packageinfo?packageID=33611).
+15. Repeat release process for older Fedora branches such as Fedora 36 aka `f36`:
+ ```sh
+ fedpkg switch-branch f36
+ git merge rawhide
+ fedpkg push
+ fedpkg build
+ fedpkg update # or use Bodhi's page "Create New Update" at https://bodhi.fedoraproject.org/updates/new
+ ```
+
+## Publishing to RDO
+
+**NOTE:** Before publishing an updated RPM for Fedora or RDO, contact Alfredo Moralejo Alonso <amoralej@redhat.com>
+(amoralej) or Joel Capitao <jcapitao@redhat.com> (jcapitao[m]) in `#rdo` on [OFTC IRC](https://www.oftc.net/) about the
+latest release process.
+
+[All `master` branches on RDO trunk](https://trunk.rdoproject.org) consume code from the `master` branch of the Ansible
+OpenStack collection. Its RPM is (re)build whenever a new patch has been merged to the collection repository. Afterwards
+[it is promoted as any other TripleO components in `client` component CI](
+https://docs.openstack.org/tripleo-docs/latest/ci/stages-overview.html).
+
+To update stable RDO branches such as [`CentOS 9 Zed`](https://trunk.rdoproject.org/centos9-zed/), patches have to be
+submitted to CentOS Cloud SIG repositories. In this case, create a patch for stable branches such as `wallaby-rdo`, and
+`ussuri-rdo` at [ansible-collections-openstack-distgit](
+https://github.com/rdo-packages/ansible-collections-openstack-distgit). [Example](
+https://review.rdoproject.org/r/c/openstack/ansible-collections-openstack-distgit/+/34282).
diff --git a/ansible_collections/openstack/cloud/docs/reviewing.md b/ansible_collections/openstack/cloud/docs/reviewing.md
new file mode 100644
index 000000000..75ef25d4d
--- /dev/null
+++ b/ansible_collections/openstack/cloud/docs/reviewing.md
@@ -0,0 +1,66 @@
+# Reviews
+
+How to do a review? What to look for when reviewing patches?
+
+* Should functionality be implemented in Ansible modules or in openstacksdk? Ansible modules should only be "wrappers"
+ for functionality in openstacksdk. Big code chunks are a good indicator that functionality should better be moved to
+ openstacksdk.
+* For each function call(s) and code section which has been refactored, does the new code return the same results?
+ Pay special attention whenever a function from openstacksdk's cloud layer has been replaced because those functions
+ often have different semantics than functions of SDK's proxy layer.
+* Can API calls (to OpenStack API, not openstacksdk API) be reduced any further to improve performance?
+* Can calls to OpenStack API be tweaked to return less data?
+ For example, listing calls such as `image.images()` or `network.networks()` provide filters to reduce the number of
+ returned values.
+* Sanity check `argument_spec` and `module_kwargs`. Some modules try to be clever and add checks to fail early instead
+ of letting `openstacksdk` or OpenStack API handle incompatible arguments.
+* Are `choices` in module attributes apropriate? Sometimes it makes sense to get rid of the choices because the choices
+ are simply to narrow and might soon be outdated again.
+* Are `choices` in module attributes still valid? Module code might be written long ago and thus the choices might be
+ horrible outdated.
+* Does a module use `name` as module options for resource names instead of e.g. `port` in `port` module? Rename those
+ attributes to `name` to be consistent with other modules and with openstacksdk. When refactoring a module, then add
+ the old attribute as an alias to keep backward compatibility.
+* Does the module have integration tests in `ci/roles`?
+* Is documentation in `DOCUMENTATION`, `RETURN` and `EXAMPLES` up to date?
+* Does `RETURN` list all values which are returned by the module?
+* Are descriptions, keys, names, types etc. in `RETURN` up to date and sorted?
+ - For example, [`type: complex` often can be changed to `type: list` / `elements: dict`](
+ https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_documenting.html).
+ - `returned: always, but can be null` often has to be changed to `returned: always, but can be empty` or shorter
+ `returned: always`.
+ - Are there any values in `RETURN` which are not returned by OpenStack SDK any longer?
+ - Module return value documentation can be found in [OpenStack SDK docs](
+ https://docs.openstack.org/openstacksdk/latest/), e.g. [Identity v3 API](
+ https://docs.openstack.org/openstacksdk/latest/user/proxies/identity_v3.html).
+ For more detailed descriptions on return values refer to [OpenStack API](https://docs.openstack.org/api-ref/).
+* Do integration tests have assertions of module's return values?
+* Does `RETURN` documentation and assertions in integration tests match?
+* Does `RETURN` documentation and `self.exit_json()` statements match?
+* Do all modules use `to_dict(computed=False)` before returning values?
+* Because `id` is already part of most resource dictionaries returned from modules, we can safely drop dedicated `id`
+ attributes in `self.exit_json()` calls. We will not loose data and we break backward compatibility anyway.
+* Is `EXAMPLES` documentation up to date?
+ When module arguments have been changed, examples have to be updated as well.
+* Do integration tests execute successfully in your local dev environment? \
+ Example:
+ ```sh
+ ansible-playbook -vvv ci/run-collection.yml \
+ -e "sdk_version=1.0.0 cloud=devstack-admin cloud_alt=devstack-alt" \
+ --tags floating_ip_info
+ ```
+* Does a patch remove any functionality or break backwards compatibility? The author must give a good explanation for
+ both.
+ - One valid reason is that a functionality has never worked before.
+ - Not a valid reason for dropping functionality or backwards compatibility is that functions from openstacksdk's proxy
+ layer do not support the functionality from openstacksdk's cloud layer. [SDK's cloud layer is not going away](
+ https://meetings.opendev.org/irclogs/%23openstack-sdks/%23openstack-sdks.2022-04-27.log.html) and can be used for
+ functionality which openstacksdk's proxy layer does not support. For example, `list_*` functions from openstacksdk's
+ cloud layer such as `search_users()` allow to filter retrieved results with function parameter `filters`.
+ openstacksdk's proxy layer does not provide an equivalent and thus the use of `search_users()` is perfectly fine.
+* Try to look at the patch from user perspective:
+ - Will users understand and approve the change(s)?
+ - Will the patch break their code?
+ **Note**: For operators / administrators, a stable and reliable and bug free API is more important than the number
+ of features.
+ - If a change breaks or changes the behavior of their code, will it be easy to spot the difference?
diff --git a/ansible_collections/openstack/cloud/meta/runtime.yml b/ansible_collections/openstack/cloud/meta/runtime.yml
index 29e358ed8..2b04142ea 100644
--- a/ansible_collections/openstack/cloud/meta/runtime.yml
+++ b/ansible_collections/openstack/cloud/meta/runtime.yml
@@ -3,86 +3,66 @@ action_groups:
openstack:
- address_scope
- auth
+ - baremetal_deploy_template
- baremetal_inspect
- - baremetal_inspect
- - baremetal_node
- baremetal_node
- baremetal_node_action
- - baremetal_node_action
- baremetal_node_info
- - baremetal_port_info
- baremetal_port
- - catalog_endpoint
- - catalog_service
+ - baremetal_port_info
- catalog_service
+ - catalog_service_info
- coe_cluster
- coe_cluster_template
- compute_flavor
- - compute_flavor
- - compute_flavor
+ - compute_flavor_access
- compute_flavor_info
- - compute_flavor_info
- - compute_service_info
- compute_service_info
- config
- - config
- dns_zone
- dns_zone_info
- endpoint
- - endpoint
- federation_idp
- - federation_idp
- - federation_idp_info
- federation_idp_info
- federation_mapping
- - federation_mapping
- - federation_mapping_info
- federation_mapping_info
- floating_ip
- floating_ip_info
- group_assignment
- - group_assignment
- - host_aggregate
- host_aggregate
- identity_domain
- - identity_domain
- - identity_domain_info
- identity_domain_info
- identity_group
- - identity_group
- - identity_group_info
- identity_group_info
- identity_role
- - identity_role
- - identity_user
+ - identity_role_info
- identity_user
- identity_user_info
- - identity_user_info
- image
- image_info
- keypair
- keypair_info
- keystone_federation_protocol
- keystone_federation_protocol_info
- - lb_listener
+ - lb_health_monitor
- lb_listener
- lb_member
- - lb_member
- - lb_pool
- lb_pool
- loadbalancer
- network
- networks_info
+ - neutron_rbac_policies_info
+ - neutron_rbac_policy
- object
- object_container
- port
- port_info
- project
- - project_access
- project_info
- quota
- recordset
- - role_assignment
+ - resource
+ - resources
- role_assignment
- router
- routers_info
@@ -97,106 +77,9 @@ action_groups:
- server_metadata
- server_volume
- stack
+ - stack_info
- subnet
- - subnets_info
- subnet_pool
- - volume
- - volume_backup
- - volume_backup_info
- - volume_info
- - volume_snapshot
- - volume_snapshot_info
- os:
- - auth
- - baremetal_inspect
- - baremetal_inspect
- - baremetal_node
- - baremetal_node
- - baremetal_node_action
- - baremetal_node_action
- - catalog_endpoint
- - catalog_service
- - catalog_service
- - coe_cluster
- - coe_cluster_template
- - compute_flavor
- - compute_flavor
- - compute_flavor
- - compute_flavor_info
- - compute_flavor_info
- - config
- - config
- - dns_zone
- - dns_zone
- - endpoint
- - endpoint
- - federation_idp
- - federation_idp
- - federation_idp_info
- - federation_idp_info
- - federation_mapping
- - federation_mapping
- - federation_mapping_info
- - federation_mapping_info
- - floating_ip
- - group_assignment
- - group_assignment
- - host_aggregate
- - host_aggregate
- - identity_domain
- - identity_domain
- - identity_domain_info
- - identity_domain_info
- - identity_group
- - identity_group
- - identity_group_info
- - identity_group_info
- - identity_role
- - identity_role
- - identity_user
- - identity_user
- - identity_user_info
- - identity_user_info
- - image
- - image_info
- - keypair
- - keypair_info
- - keystone_federation_protocol
- - keystone_federation_protocol_info
- - lb_listener
- - lb_listener
- - lb_member
- - lb_member
- - lb_pool
- - lb_pool
- - loadbalancer
- - network
- - networks_info
- - object
- - object_container
- - port
- - port_info
- - project
- - project_access
- - project_info
- - quota
- - recordset
- - role_assignment
- - role_assignment
- - router
- - routers_info
- - security_group
- - security_group_info
- - security_group_rule
- - security_group_rule_info
- - server
- - server_action
- - server_group
- - server_info
- - server_metadata
- - server_volume
- - stack
- - subnet
- subnets_info
- volume
- volume_backup
@@ -204,410 +87,4 @@ action_groups:
- volume_info
- volume_snapshot
- volume_snapshot_info
- - os_auth
- - os_client_config
- - os_client_config
- - os_coe_cluster
- - os_coe_cluster_template
- - os_endpoint
- - os_flavor
- - os_flavor_info
- - os_flavor_info
- - os_floating_ip
- - os_group
- - os_group
- - os_group_info
- - os_group_info
- - os_image
- - os_image_info
- - os_ironic
- - os_ironic
- - os_ironic_inspect
- - os_ironic_inspect
- - os_ironic_node
- - os_ironic_node
- - os_keypair
- - os_keystone_domain
- - os_keystone_domain
- - os_keystone_domain_info
- - os_keystone_domain_info
- - os_keystone_endpoint
- - os_keystone_endpoint
- - os_keystone_federation_protocol
- - os_keystone_federation_protocol_info
- - os_keystone_identity_provider
- - os_keystone_identity_provider
- - os_keystone_identity_provider_info
- - os_keystone_identity_provider_info
- - os_keystone_mapping
- - os_keystone_mapping
- - os_keystone_mapping_info
- - os_keystone_mapping_info
- - os_keystone_role
- - os_keystone_role
- - os_keystone_service
- - os_keystone_service
- - os_listener
- - os_listener
- - os_loadbalancer
- - os_member
- - os_member
- - os_network
- - os_networks_info
- - os_nova_flavor
- - os_nova_flavor
- - os_nova_host_aggregate
- - os_nova_host_aggregate
- - os_object
- - os_pool
- - os_pool
- - os_port
- - os_port_info
- - os_project
- - os_project_access
- - os_project_info
- - os_quota
- - os_recordset
- - os_router
- - os_routers_info
- - os_security_group
- - os_security_group_rule
- - os_server
- - os_server_action
- - os_server_group
- - os_server_info
- - os_server_metadata
- - os_server_volume
- - os_stack
- - os_subnet
- - os_subnets_info
- - os_user
- - os_user
- - os_user_group
- - os_user_group
- - os_user_info
- - os_user_info
- - os_user_role
- - os_user_role
- - os_volume
- - os_volume_snapshot
- - os_zone
- - os_zone
-
-plugin_routing:
- modules:
- os_auth:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.auth
- redirect: openstack.cloud.auth
- os_client_config:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.config
- redirect: openstack.cloud.config
- os_coe_cluster:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.coe_cluster
- redirect: openstack.cloud.coe_cluster
- os_coe_cluster_template:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.coe_cluster_template
- redirect: openstack.cloud.coe_cluster_template
- os_endpoint:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.catalog_endpoint
- redirect: openstack.cloud.catalog_endpoint
- os_flavor:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.compute_flavor
- redirect: openstack.cloud.compute_flavor
- os_flavor_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.compute_flavor_info
- redirect: openstack.cloud.compute_flavor_info
- os_floating_ip:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.floating_ip
- redirect: openstack.cloud.floating_ip
- os_group:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_group
- redirect: openstack.cloud.identity_group
- os_group_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_group_info
- redirect: openstack.cloud.identity_group_info
- os_image:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.image
- redirect: openstack.cloud.image
- os_image_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.image_info
- redirect: openstack.cloud.image_info
- os_ironic:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.baremetal_node
- redirect: openstack.cloud.baremetal_node
- os_ironic_inspect:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.baremetal_inspect
- redirect: openstack.cloud.baremetal_inspect
- os_ironic_node:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.baremetal_node_action
- redirect: openstack.cloud.baremetal_node_action
- os_keypair:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.keypair
- redirect: openstack.cloud.keypair
- os_keystone_domain:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_domain
- redirect: openstack.cloud.identity_domain
- os_keystone_domain_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_domain_info
- redirect: openstack.cloud.identity_domain_info
- os_keystone_endpoint:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.endpoint
- redirect: openstack.cloud.endpoint
- os_keystone_federation_protocol:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.keystone_federation_protocol
- redirect: openstack.cloud.keystone_federation_protocol
- os_keystone_federation_protocol_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.keystone_federation_protocol_info
- redirect: openstack.cloud.keystone_federation_protocol_info
- os_keystone_identity_provider:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.federation_idp
- redirect: openstack.cloud.federation_idp
- os_keystone_identity_provider_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.federation_idp_info
- redirect: openstack.cloud.federation_idp_info
- os_keystone_mapping:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.federation_mapping
- redirect: openstack.cloud.federation_mapping
- os_keystone_mapping_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.federation_mapping_info
- redirect: openstack.cloud.federation_mapping_info
- os_keystone_role:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_role
- redirect: openstack.cloud.identity_role
- os_keystone_service:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.catalog_service
- redirect: openstack.cloud.catalog_service
- os_listener:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.lb_listener
- redirect: openstack.cloud.lb_listener
- os_loadbalancer:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.loadbalancer
- redirect: openstack.cloud.loadbalancer
- os_member:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.lb_member
- redirect: openstack.cloud.lb_member
- os_network:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.network
- redirect: openstack.cloud.network
- os_networks_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.networks_info
- redirect: openstack.cloud.networks_info
- os_nova_flavor:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.compute_flavor
- redirect: openstack.cloud.compute_flavor
- os_nova_host_aggregate:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.host_aggregate
- redirect: openstack.cloud.host_aggregate
- os_object:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.object
- redirect: openstack.cloud.object
- os_pool:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.lb_pool
- redirect: openstack.cloud.lb_pool
- os_port:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.port
- redirect: openstack.cloud.port
- os_port_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.port_info
- redirect: openstack.cloud.port_info
- os_project:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.project
- redirect: openstack.cloud.project
- os_project_access:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.project_access
- redirect: openstack.cloud.project_access
- os_project_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.project_info
- redirect: openstack.cloud.project_info
- os_quota:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.quota
- redirect: openstack.cloud.quota
- os_recordset:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.recordset
- redirect: openstack.cloud.recordset
- os_router:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.router
- redirect: openstack.cloud.router
- os_routers_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.routers_info
- redirect: openstack.cloud.routers_info
- os_security_group:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.security_group
- redirect: openstack.cloud.security_group
- os_security_group_rule:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.security_group_rule
- redirect: openstack.cloud.security_group_rule
- os_server:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server
- redirect: openstack.cloud.server
- os_server_action:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server_action
- redirect: openstack.cloud.server_action
- os_server_group:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server_group
- redirect: openstack.cloud.server_group
- os_server_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server_info
- redirect: openstack.cloud.server_info
- os_server_metadata:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server_metadata
- redirect: openstack.cloud.server_metadata
- os_server_volume:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.server_volume
- redirect: openstack.cloud.server_volume
- os_stack:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.stack
- redirect: openstack.cloud.stack
- os_subnet:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.subnet
- redirect: openstack.cloud.subnet
- os_subnets_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.subnets_info
- redirect: openstack.cloud.subnets_info
- os_user:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_user
- redirect: openstack.cloud.identity_user
- os_user_group:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.group_assignment
- redirect: openstack.cloud.group_assignment
- os_user_info:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.identity_user_info
- redirect: openstack.cloud.identity_user_info
- os_user_role:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.role_assignment
- redirect: openstack.cloud.role_assignment
- os_volume:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.volume
- redirect: openstack.cloud.volume
- os_volume_snapshot:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.volume_snapshot
- redirect: openstack.cloud.volume_snapshot
- os_zone:
- deprecation:
- removal_date: 2021-12-12
- warning_text: os_ prefixed module names are deprecated, use openstack.cloud.dns_zone
- redirect: openstack.cloud.dns_zone
+ - volume_type_access
diff --git a/ansible_collections/openstack/cloud/plugins/doc_fragments/openstack.py b/ansible_collections/openstack/cloud/plugins/doc_fragments/openstack.py
index 37d51bb2e..5b12077c5 100644
--- a/ansible_collections/openstack/cloud/plugins/doc_fragments/openstack.py
+++ b/ansible_collections/openstack/cloud/plugins/doc_fragments/openstack.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Hewlett-Packard Development Company, L.P.
@@ -45,7 +46,7 @@ options:
description:
- Should ansible wait until the requested resource is complete.
type: bool
- default: yes
+ default: true
timeout:
description:
- How long should ansible wait for the requested resource.
@@ -59,7 +60,7 @@ options:
validate_certs:
description:
- Whether or not SSL API requests should be verified.
- - Before Ansible 2.3 this defaulted to C(yes).
+ - Before Ansible 2.3 this defaulted to C(true).
type: bool
aliases: [ verify ]
ca_cert:
@@ -85,10 +86,6 @@ options:
choices: [ admin, internal, public ]
default: public
aliases: [ endpoint_type ]
- availability_zone:
- description:
- - Ignored. Present for backwards compatibility
- type: str
sdk_log_path:
description:
- Path to the logfile of the OpenStackSDK. If empty no log is written
@@ -99,8 +96,8 @@ options:
default: INFO
choices: [INFO, DEBUG]
requirements:
- - python >= 3.6
- - openstacksdk >= 0.36, < 0.99.0
+ - "python >= 3.6"
+ - "openstacksdk >= 1.0.0"
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
diff --git a/ansible_collections/openstack/cloud/plugins/inventory/openstack.py b/ansible_collections/openstack/cloud/plugins/inventory/openstack.py
index def6c04ba..826e304b6 100644
--- a/ansible_collections/openstack/cloud/plugins/inventory/openstack.py
+++ b/ansible_collections/openstack/cloud/plugins/inventory/openstack.py
@@ -1,4 +1,6 @@
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
+
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
@@ -6,410 +8,449 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
+DOCUMENTATION = r'''
name: openstack
author: OpenStack Ansible SIG
short_description: OpenStack inventory source
description:
- - Get inventory hosts from OpenStack clouds
- - Uses openstack.(yml|yaml) YAML configuration file to configure the inventory plugin
- - Uses standard clouds.yaml YAML configuration file to configure cloud credentials
+ - Gather servers from OpenStack clouds and add them as Ansible hosts to your
+ inventory.
+ - Use YAML configuration file C(openstack.{yaml,yml}) to configure this
+ inventory plugin.
+ - Consumes cloud credentials from standard YAML configuration files
+ C(clouds{,-public}.yaml).
options:
- plugin:
- description: token that ensures this is a source file for the 'openstack' plugin.
- required: True
- choices: ['openstack', 'openstack.cloud.openstack']
- show_all:
- description: toggles showing all vms vs only those with a working IP
- type: bool
- default: false
- inventory_hostname:
- description: |
- What to register as the inventory hostname.
- If set to 'uuid' the uuid of the server will be used and a
- group will be created for the server name.
- If set to 'name' the name of the server will be used unless
- there are more than one server with the same name in which
- case the 'uuid' logic will be used.
- Default is to do 'name', which is the opposite of the old
- openstack.py inventory script's option use_hostnames)
- type: string
- choices:
- - name
- - uuid
- default: "name"
- use_names:
- description: |
- Use the host's 'name' instead of 'interface_ip' for the 'ansible_host' and
- 'ansible_ssh_host' facts. This might be desired when using jump or
- bastion hosts and the name is the FQDN of the host.
- type: bool
- default: false
- expand_hostvars:
- description: |
- Run extra commands on each host to fill in additional
- information about the host. May interrogate cinder and
- neutron and can be expensive for people with many hosts.
- (Note, the default value of this is opposite from the default
- old openstack.py inventory script's option expand_hostvars)
- type: bool
- default: false
- private:
- description: |
- Use the private interface of each server, if it has one, as
- the host's IP in the inventory. This can be useful if you are
- running ansible inside a server in the cloud and would rather
- communicate to your servers over the private network.
- type: bool
- default: false
- only_clouds:
- description: |
- List of clouds from clouds.yaml to use, instead of using
- the whole list.
- type: list
- elements: str
- default: []
- fail_on_errors:
- description: |
- Causes the inventory to fail and return no hosts if one cloud
- has failed (for example, bad credentials or being offline).
- When set to False, the inventory will return as many hosts as
- it can from as many clouds as it can contact. (Note, the
- default value of this is opposite from the old openstack.py
- inventory script's option fail_on_errors)
- type: bool
- default: false
- all_projects:
- description: |
- Lists servers from all projects
- type: bool
- default: false
- clouds_yaml_path:
- description: |
- Override path to clouds.yaml file. If this value is given it
- will be searched first. The default path for the
- ansible inventory adds /etc/ansible/openstack.yaml and
- /etc/ansible/openstack.yml to the regular locations documented
- at https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files
- type: list
- elements: str
- env:
- - name: OS_CLIENT_CONFIG_FILE
- compose:
- description: Create vars from jinja2 expressions.
- type: dictionary
- default: {}
- groups:
- description: Add hosts to group based on Jinja2 conditionals.
- type: dictionary
- default: {}
- legacy_groups:
- description: Automatically create groups from host variables.
- type: bool
- default: true
+ all_projects:
+ description:
+ - Lists servers from all projects
+ type: bool
+ default: false
+ clouds_yaml_path:
+ description:
+ - Override path to C(clouds.yaml) file.
+ - If this value is given it will be searched first.
+ - Search paths for cloud credentials are complemented with files
+ C(/etc/ansible/openstack.{yaml,yml}).
+ - Default search paths are documented in
+ U(https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files).
+ type: list
+ elements: str
+ env:
+ - name: OS_CLIENT_CONFIG_FILE
+ expand_hostvars:
+ description:
+ - Enrich server facts with additional queries to OpenStack services. This
+ includes requests to Cinder and Neutron which can be time-consuming
+ for clouds with many servers.
+ - Default value of I(expand_hostvars) is opposite of the default value
+ for option C(expand_hostvars) in legacy openstack.py inventory script.
+ type: bool
+ default: false
+ fail_on_errors:
+ description:
+ - Whether the inventory script fails, returning no hosts, when connection
+ to a cloud failed, for example due to bad credentials or connectivity
+ issues.
+ - When I(fail_on_errors) is C(false) this inventory script will return
+ all hosts it could fetch from clouds on a best effort basis.
+ - Default value of I(fail_on_errors) is opposite of the default value
+ for option C(fail_on_errors) in legacy openstack.py inventory script.
+ type: bool
+ default: false
+ inventory_hostname:
+ description:
+ - What to register as inventory hostname.
+ - When set to C(uuid) the ID of a server will be used and a group will
+ be created for a server name.
+ - When set to C(name) the name of a server will be used. When multiple
+ servers share the same name, then the servers IDs will be used.
+ - Default value of I(inventory_hostname) is opposite of the default value
+ for option C(use_hostnames) in legacy openstack.py inventory script.
+ type: string
+ choices: ['name', 'uuid']
+ default: 'name'
+ legacy_groups:
+ description:
+ - Automatically create groups from host variables.
+ type: bool
+ default: true
+ only_clouds:
+ description:
+ - List of clouds in C(clouds.yaml) which will be contacted to use instead
+ of using all clouds.
+ type: list
+ elements: str
+ default: []
+ plugin:
+ description:
+ - Token which marks a given YAML configuration file as a valid input file
+ for this inventory plugin.
+ required: true
+ choices: ['openstack', 'openstack.cloud.openstack']
+ private:
+ description:
+ - Use private interfaces of servers, if available, when determining ip
+ addresses for Ansible hosts.
+ - Using I(private) helps when running Ansible from a server in the cloud
+ and one wants to ensure that servers communicate over private networks
+ only.
+ type: bool
+ default: false
+ show_all:
+ description:
+ - Whether all servers should be listed or not.
+ - When I(show_all) is C(false) then only servers with a valid ip
+ address, regardless it is private or public, will be listed.
+ type: bool
+ default: false
+ use_names:
+ description:
+ - "When I(use_names) is C(false), its default value, then a server's
+ first floating ip address will be used for both facts C(ansible_host)
+ and C(ansible_ssh_host). When no floating ip address is attached to a
+ server, then its first non-floating ip addresses is used instead. If
+ no addresses are attached to a server, then both facts will not be
+ defined."
+ - "When I(use_names) is C(true), then the server name will be for both
+ C(ansible_host) and C(ansible_ssh_host) facts. This is useful for
+ jump or bastion hosts where each server name is actually a server's
+ FQDN."
+ type: bool
+ default: false
requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.28, < 0.99.0"
+ - "python >= 3.6"
+ - "openstacksdk >= 1.0.0"
extends_documentation_fragment:
-- inventory_cache
-- constructed
-
+ - inventory_cache
+ - constructed
'''
-EXAMPLES = '''
-# file must be named openstack.yaml or openstack.yml
-# Make the plugin behave like the default behavior of the old script
+EXAMPLES = r'''
+# Create a file called openstack.yaml, add the following content and run
+# $> ansible-inventory --list -vvv -i openstack.yaml
plugin: openstack.cloud.openstack
-expand_hostvars: yes
-fail_on_errors: yes
-all_projects: yes
+
+all_projects: false
+expand_hostvars: true
+fail_on_errors: true
+only_clouds:
+ - "devstack-admin"
+strict: true
'''
import collections
import sys
-import logging
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.utils.display import Display
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
ensure_compatibility
)
-display = Display()
-os_logger = logging.getLogger("openstack")
-
try:
- # Due to the name shadowing we should import other way
- import importlib
- sdk = importlib.import_module('openstack')
- sdk_inventory = importlib.import_module('openstack.cloud.inventory')
- client_config = importlib.import_module('openstack.config.loader')
- sdk_exceptions = importlib.import_module("openstack.exceptions")
+ import openstack
HAS_SDK = True
except ImportError:
- display.vvvv("Couldn't import Openstack SDK modules")
HAS_SDK = False
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
- ''' Host inventory provider for ansible using OpenStack clouds. '''
NAME = 'openstack.cloud.openstack'
def parse(self, inventory, loader, path, cache=True):
- super(InventoryModule, self).parse(inventory, loader, path)
+ super(InventoryModule, self).parse(inventory, loader, path,
+ cache=cache)
- cache_key = self._get_cache_prefix(path)
+ if not HAS_SDK:
+ raise AnsibleParserError(
+ 'Could not import Python library openstacksdk')
- # file is config file
- self._config_data = self._read_config_data(path)
+ try:
+ ensure_compatibility(openstack.version.__version__)
+ except ImportError as e:
+ raise AnsibleParserError(
+ 'Incompatible openstacksdk library found: {0}'.format(e))
- msg = ''
- if not self._config_data:
- msg = 'File empty. this is not my config file'
- elif 'plugin' in self._config_data and self._config_data['plugin'] not in (self.NAME, 'openstack'):
- msg = 'plugin config file, but not for us: %s' % self._config_data['plugin']
- elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:
- msg = "it's not a plugin configuration nor a clouds.yaml file"
- elif not HAS_SDK:
- msg = "openstacksdk is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."
+ # Redirect logging to stderr so it does not mix with output, in
+ # particular JSON output of ansible-inventory.
+ # TODO: Integrate openstack's logging with Ansible's logging.
+ if self.display.verbosity > 3:
+ openstack.enable_logging(debug=True, stream=sys.stderr)
+ else:
+ openstack.enable_logging(stream=sys.stderr)
- if not msg:
- try:
- ensure_compatibility(sdk.version.__version__)
- except ImportError as e:
- msg = ("Incompatible openstacksdk library found: {error}."
- .format(error=str(e)))
+ config = self._read_config_data(path)
- if msg:
- display.vvvv(msg)
- raise AnsibleParserError(msg)
+ if 'plugin' not in config and 'clouds' not in config:
+ raise AnsibleParserError(
+ "Invalid OpenStack inventory configuration file found,"
+ " missing 'plugin' and 'clouds' keys.")
- if 'clouds' in self._config_data:
+ # TODO: It it wise to disregard a potential user configuration error?
+ if 'clouds' in config:
self.display.vvvv(
- "Found clouds config file instead of plugin config. "
- "Using default configuration."
- )
- self._config_data = {}
-
- # update cache if the user has caching enabled and the cache is being refreshed
- # will update variable below in the case of an expired cache
- cache_needs_update = not cache and self.get_option('cache')
-
- if cache:
- cache = self.get_option('cache')
- source_data = None
- if cache:
- self.display.vvvv("Reading inventory data from cache: %s" % cache_key)
- try:
- source_data = self._cache[cache_key]
- except KeyError:
- # cache expired or doesn't exist yet
- display.vvvv("Inventory data cache not found")
- cache_needs_update = True
+ 'Found combined plugin config and clouds config file.')
- if not source_data:
- self.display.vvvv("Getting hosts from Openstack clouds")
- clouds_yaml_path = self._config_data.get('clouds_yaml_path')
- if clouds_yaml_path:
- config_files = (
- clouds_yaml_path
- + client_config.CONFIG_FILES
- )
- else:
- config_files = None
+ servers = self._fetch_servers(path, cache)
- # Redict logging to stderr so it does not mix with output
- # particular ansible-inventory JSON output
- # TODO(mordred) Integrate openstack's logging with ansible's logging
- if self.display.verbosity > 3:
- sdk.enable_logging(debug=True, stream=sys.stderr)
- else:
- sdk.enable_logging(stream=sys.stderr)
-
- cloud_inventory = sdk_inventory.OpenStackInventory(
- config_files=config_files,
- private=self._config_data.get('private', False))
- self.display.vvvv("Found %d cloud(s) in Openstack" %
- len(cloud_inventory.clouds))
- only_clouds = self._config_data.get('only_clouds', [])
- if only_clouds and not isinstance(only_clouds, list):
- raise ValueError(
- 'OpenStack Inventory Config Error: only_clouds must be'
- ' a list')
- if only_clouds:
- new_clouds = []
- for cloud in cloud_inventory.clouds:
- self.display.vvvv("Looking at cloud : %s" % cloud.name)
- if cloud.name in only_clouds:
- self.display.vvvv("Selecting cloud : %s" % cloud.name)
- new_clouds.append(cloud)
- cloud_inventory.clouds = new_clouds
-
- self.display.vvvv("Selected %d cloud(s)" %
- len(cloud_inventory.clouds))
-
- expand_hostvars = self._config_data.get('expand_hostvars', False)
- fail_on_errors = self._config_data.get('fail_on_errors', False)
- all_projects = self._config_data.get('all_projects', False)
- self.use_names = self._config_data.get('use_names', False)
-
- source_data = []
- try:
- source_data = cloud_inventory.list_hosts(
- expand=expand_hostvars, fail_on_cloud_config=fail_on_errors,
- all_projects=all_projects)
- except Exception as e:
- self.display.warning("Couldn't list Openstack hosts. "
- "See logs for details")
- os_logger.error(e.message)
- finally:
- if cache_needs_update:
- self._cache[cache_key] = source_data
-
- self._populate_from_source(source_data)
-
- def _populate_from_source(self, source_data):
- groups = collections.defaultdict(list)
- firstpass = collections.defaultdict(list)
- hostvars = {}
-
- use_server_id = (
- self._config_data.get('inventory_hostname', 'name') != 'name')
- show_all = self._config_data.get('show_all', False)
-
- for server in source_data:
- if 'interface_ip' not in server and not show_all:
- continue
- firstpass[server['name']].append(server)
-
- for name, servers in firstpass.items():
- if len(servers) == 1 and not use_server_id:
- self._append_hostvars(hostvars, groups, name, servers[0])
- else:
- server_ids = set()
- # Trap for duplicate results
- for server in servers:
- server_ids.add(server['id'])
- if len(server_ids) == 1 and not use_server_id:
- self._append_hostvars(hostvars, groups, name, servers[0])
- else:
- for server in servers:
- self._append_hostvars(
- hostvars, groups, server['id'], server,
- namegroup=True)
+ # determine inventory hostnames
+ if self.get_option('inventory_hostname') == 'name':
+ count = collections.Counter(s['name'] for s in servers)
- self._set_variables(hostvars, groups)
+ inventory = dict(((server['name'], server)
+ if count[server['name']] == 1
+ else (server['id'], server))
+ for server in servers)
- def _set_variables(self, hostvars, groups):
+ else: # self.get_option('inventory_hostname') == 'uuid'
+ inventory = dict((server['id'], server)
+ for server in servers)
- strict = self.get_option('strict')
+ # drop servers without addresses
+ show_all = self.get_option('show_all')
+ inventory = dict((k, v)
+ for k, v in inventory.items()
+ if show_all or v['addresses'])
- # set vars in inventory from hostvars
- for host in hostvars:
+ for hostname, server in inventory.items():
+ host_vars = self._generate_host_vars(hostname, server)
+ self._add_host(hostname, host_vars)
- # actually update inventory
- for key in hostvars[host]:
- self.inventory.set_variable(host, key, hostvars[host][key])
+ if self.get_option('legacy_groups'):
+ for hostname, server in inventory.items():
+ for group in self._generate_legacy_groups(server):
+ group_name = self.inventory.add_group(group)
+ if group_name == hostname:
+ self.display.vvvv(
+ 'Same name for host {0} and group {1}'
+ .format(hostname, group_name))
+ self.inventory.add_host(hostname, group_name)
+ else:
+ self.inventory.add_child(group_name, hostname)
+
+ def _add_host(self, hostname, host_vars):
+ # Ref.: https://docs.ansible.com/ansible/latest/dev_guide/
+ # developing_inventory.html#constructed-features
+
+ self.inventory.add_host(hostname, group='all')
+
+ for k, v in host_vars.items():
+ self.inventory.set_variable(hostname, k, v)
- # create composite vars
- self._set_composite_vars(
- self._config_data.get('compose'), self.inventory.get_host(host).get_vars(), host, strict)
+ strict = self.get_option('strict')
- # constructed groups based on conditionals
- self._add_host_to_composed_groups(
- self._config_data.get('groups'), hostvars[host], host, strict)
+ self._set_composite_vars(
+ self.get_option('compose'), host_vars, hostname, strict=True)
- # constructed groups based on jinja expressions
- self._add_host_to_keyed_groups(
- self._config_data.get('keyed_groups'), hostvars[host], host, strict)
+ self._add_host_to_composed_groups(
+ self.get_option('groups'), host_vars, hostname, strict=strict)
- for group_name, group_hosts in groups.items():
- gname = self.inventory.add_group(group_name)
- for host in group_hosts:
- if gname == host:
- display.vvvv("Same name for host %s and group %s" % (host, gname))
- self.inventory.add_host(host, gname)
- else:
- self.inventory.add_child(gname, host)
+ self._add_host_to_keyed_groups(
+ self.get_option('keyed_groups'), host_vars, hostname,
+ strict=strict)
- def _get_groups_from_server(self, server_vars, namegroup=True):
- groups = []
+ def _fetch_servers(self, path, cache):
+ cache_key = self._get_cache_prefix(path)
+ user_cache_setting = self.get_option('cache')
+ attempt_to_read_cache = user_cache_setting and cache
+ cache_needs_update = not cache and user_cache_setting
- region = server_vars['region']
- cloud = server_vars['cloud']
- metadata = server_vars.get('metadata', {})
+ servers = None
- # Create a group for the cloud
- groups.append(cloud)
+ if attempt_to_read_cache:
+ self.display.vvvv('Reading OpenStack inventory cache key {0}'
+ .format(cache_key))
+ try:
+ servers = self._cache[cache_key]
+ except KeyError:
+ self.display.vvvv("OpenStack inventory cache not found")
+ cache_needs_update = True
- # Create a group on region
- if region:
- groups.append(region)
+ if not attempt_to_read_cache or cache_needs_update:
+ self.display.vvvv('Retrieving servers from Openstack clouds')
+ clouds_yaml_path = self.get_option('clouds_yaml_path')
+ config_files = openstack.config.loader.CONFIG_FILES
+ if clouds_yaml_path:
+ config_files += clouds_yaml_path
- # And one by cloud_region
- groups.append("%s_%s" % (cloud, region))
+ config = openstack.config.loader.OpenStackConfig(
+ config_files=config_files)
- # Check if group metadata key in servers' metadata
- if 'group' in metadata:
- groups.append(metadata['group'])
+ only_clouds = self.get_option('only_clouds', [])
+ if only_clouds:
+ if not isinstance(only_clouds, list):
+ raise AnsibleParserError(
+ 'Option only_clouds in OpenStack inventory'
+ ' configuration is not a list')
- for extra_group in metadata.get('groups', '').split(','):
- if extra_group:
- groups.append(extra_group.strip())
+ cloud_regions = [config.get_one(cloud=cloud)
+ for cloud in only_clouds]
+ else:
+ cloud_regions = config.get_all()
- groups.append('instance-%s' % server_vars['id'])
- if namegroup:
- groups.append(server_vars['name'])
+ clouds = [openstack.connection.Connection(config=cloud_region)
+ for cloud_region in cloud_regions]
- for key in ('flavor', 'image'):
- if 'name' in server_vars[key]:
- groups.append('%s-%s' % (key, server_vars[key]['name']))
+ self.display.vvvv(
+ 'Found {0} OpenStack cloud(s)'
+ .format(len(clouds)))
- for key, value in iter(metadata.items()):
- groups.append('meta-%s_%s' % (key, value))
+ self.display.vvvv(
+ 'Using {0} OpenStack cloud(s)'
+ .format(len(clouds)))
+
+ expand_hostvars = self.get_option('expand_hostvars')
+ all_projects = self.get_option('all_projects')
+ servers = []
+
+ def _expand_server(server, cloud, volumes):
+ # calling openstacksdk's compute.servers() with
+ # details=True already fetched most facts
+
+ # cloud dict is used for legacy_groups option
+ server['cloud'] = dict(name=cloud.name)
+ region = cloud.config.get_region_name()
+ if region:
+ server['cloud']['region'] = region
+
+ if not expand_hostvars:
+ # do not query OpenStack API for additional data
+ return server
+
+ # TODO: Consider expanding 'flavor', 'image' and
+ # 'security_groups' when users still require this
+ # functionality.
+ # Ref.: https://opendev.org/openstack/openstacksdk/src/commit/\
+ # 289e5c2d3cba0eb1c008988ae5dccab5be05d9b6/openstack/cloud/meta.py#L482
+
+ server['volumes'] = [v for v in volumes
+ if any(a['server_id'] == server['id']
+ for a in v['attachments'])]
+
+ return server
+
+ for cloud in clouds:
+ if expand_hostvars:
+ volumes = [v.to_dict(computed=False)
+ for v in cloud.block_storage.volumes()]
+ else:
+ volumes = []
+
+ try:
+ for server in [
+ # convert to dict before expanding servers
+ # to allow us to attach attributes
+ _expand_server(server.to_dict(computed=False),
+ cloud,
+ volumes)
+ for server in cloud.compute.servers(
+ all_projects=all_projects,
+ # details are required because 'addresses'
+ # attribute must be populated
+ details=True)
+ ]:
+ servers.append(server)
+ except openstack.exceptions.OpenStackCloudException as e:
+ self.display.warning(
+ 'Fetching servers for cloud {0} failed with: {1}'
+ .format(cloud.name, str(e)))
+ if self.get_option('fail_on_errors'):
+ raise
+
+ if cache_needs_update:
+ self._cache[cache_key] = servers
+
+ return servers
+
+ def _generate_host_vars(self, hostname, server):
+ # populate host_vars with 'ansible_host', 'ansible_ssh_host' and
+ # 'openstack' facts
+
+ host_vars = dict(openstack=server)
+
+ if self.get_option('use_names'):
+ host_vars['ansible_ssh_host'] = server['name']
+ host_vars['ansible_host'] = server['name']
+ else:
+ # flatten addresses dictionary
+ addresses = [a
+ for addresses in (server['addresses'] or {}).values()
+ for a in addresses]
+
+ floating_ip = next(
+ (address['addr'] for address in addresses
+ if address['OS-EXT-IPS:type'] == 'floating'),
+ None)
+
+ fixed_ip = next(
+ (address['addr'] for address in addresses
+ if address['OS-EXT-IPS:type'] == 'fixed'),
+ None)
+
+ ip = floating_ip if floating_ip is not None and not self.get_option('private') else fixed_ip
+
+ if ip is not None:
+ host_vars['ansible_ssh_host'] = ip
+ host_vars['ansible_host'] = ip
+
+ return host_vars
+
+ def _generate_legacy_groups(self, server):
+ groups = []
- az = server_vars.get('az', None)
- if az:
- # Make groups for az, region_az and cloud_region_az
- groups.append(az)
- groups.append('%s_%s' % (region, az))
- groups.append('%s_%s_%s' % (cloud, region, az))
- return groups
+ # cloud was added by _expand_server()
+ cloud = server['cloud']
- def _append_hostvars(self, hostvars, groups, current_host,
- server, namegroup=False):
- if not self.use_names:
- hostvars[current_host] = dict(
- ansible_ssh_host=server['interface_ip'],
- ansible_host=server['interface_ip'],
- openstack=server,
- )
+ cloud_name = cloud['name']
+ groups.append(cloud_name)
- if self.use_names:
- hostvars[current_host] = dict(
- ansible_ssh_host=server['name'],
- ansible_host=server['name'],
- openstack=server,
- )
+ region = cloud['region'] if 'region' in cloud else None
+ if region is not None:
+ groups.append(region)
+ groups.append('{cloud}_{region}'.format(cloud=cloud_name,
+ region=region))
- self.inventory.add_host(current_host)
+ metadata = server.get('metadata', {})
+ if 'group' in metadata:
+ groups.append(metadata['group'])
+ for extra_group in metadata.get('groups', '').split(','):
+ if extra_group:
+ groups.append(extra_group.strip())
+ for k, v in metadata.items():
+ groups.append('meta-{k}_{v}'.format(k=k, v=v))
+
+ groups.append('instance-{id}'.format(id=server['id']))
+
+ for k in ('flavor', 'image'):
+ if 'name' in server[k]:
+ groups.append('{k}-{v}'.format(k=k, v=server[k]['name']))
+
+ availability_zone = server['availability_zone']
+ if availability_zone:
+ groups.append(availability_zone)
+ if region:
+ groups.append(
+ '{region}_{availability_zone}'
+ .format(region=region,
+ availability_zone=availability_zone))
+ groups.append(
+ '{cloud}_{region}_{availability_zone}'
+ .format(cloud=cloud_name,
+ region=region,
+ availability_zone=availability_zone))
- if self.get_option('legacy_groups'):
- for group in self._get_groups_from_server(server, namegroup=namegroup):
- groups[group].append(current_host)
+ return groups
def verify_file(self, path):
-
if super(InventoryModule, self).verify_file(path):
for fn in ('openstack', 'clouds'):
for suffix in ('yaml', 'yml'):
maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix)
if path.endswith(maybe):
- self.display.vvvv("Valid plugin config file found")
+ self.display.vvvv(
+ 'OpenStack inventory configuration file found:'
+ ' {0}'.format(maybe))
return True
return False
diff --git a/ansible_collections/openstack/cloud/plugins/module_utils/ironic.py b/ansible_collections/openstack/cloud/plugins/module_utils/ironic.py
index a7ab19ef2..44f53560b 100644
--- a/ansible_collections/openstack/cloud/plugins/module_utils/ironic.py
+++ b/ansible_collections/openstack/cloud/plugins/module_utils/ironic.py
@@ -1,3 +1,6 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -30,8 +33,8 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import o
def ironic_argument_spec(**kwargs):
spec = dict(
- auth_type=dict(required=False),
- ironic_url=dict(required=False),
+ auth_type=dict(),
+ ironic_url=dict(),
)
spec.update(kwargs)
return openstack_full_argument_spec(**spec)
diff --git a/ansible_collections/openstack/cloud/plugins/module_utils/openstack.py b/ansible_collections/openstack/cloud/plugins/module_utils/openstack.py
index 8663d2fca..2f365121e 100644
--- a/ansible_collections/openstack/cloud/plugins/module_utils/openstack.py
+++ b/ansible_collections/openstack/cloud/plugins/module_utils/openstack.py
@@ -1,3 +1,6 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
@@ -43,41 +46,13 @@ import importlib
import os
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import iteritems
-
-OVERRIDES = {'os_client_config': 'config',
- 'os_endpoint': 'catalog_endpoint',
- 'os_flavor': 'compute_flavor',
- 'os_flavor_info': 'compute_flavor_info',
- 'os_group': 'identity_group',
- 'os_group_info': 'identity_group_info',
- 'os_ironic': 'baremetal_node',
- 'os_ironic_inspect': 'baremetal_inspect',
- 'os_ironic_node': 'baremetal_node_action',
- 'os_keystone_domain': 'identity_domain',
- 'os_keystone_domain_info': 'identity_domain_info',
- 'os_keystone_endpoint': 'endpoint',
- 'os_keystone_identity_provider': 'federation_idp',
- 'os_keystone_identity_provider_info': 'federation_idp_info',
- 'os_keystone_mapping': 'federation_mapping',
- 'os_keystone_mapping_info': 'federation_mapping_info',
- 'os_keystone_role': 'identity_role',
- 'os_keystone_service': 'catalog_service',
- 'os_listener': 'lb_listener',
- 'os_member': 'lb_member',
- 'os_nova_flavor': 'compute_flavor',
- 'os_nova_host_aggregate': 'host_aggregate',
- 'os_pool': 'lb_pool',
- 'os_user': 'identity_user',
- 'os_user_group': 'group_assignment',
- 'os_user_info': 'identity_user_info',
- 'os_user_role': 'role_assignment',
- 'os_zone': 'dns_zone'}
+
+OVERRIDES = {}
CUSTOM_VAR_PARAMS = ['min_ver', 'max_ver']
-MINIMUM_SDK_VERSION = '0.36.0'
-MAXIMUM_SDK_VERSION = '0.98.999'
+MINIMUM_SDK_VERSION = '1.0.0'
+MAXIMUM_SDK_VERSION = None
def ensure_compatibility(version, min_version=None, max_version=None):
@@ -130,7 +105,6 @@ def openstack_argument_spec():
login_username=dict(default=OS_USERNAME),
auth_url=dict(default=OS_AUTH_URL),
region_name=dict(default=OS_REGION_NAME),
- availability_zone=dict(),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
@@ -143,39 +117,25 @@ def openstack_argument_spec():
return spec
-def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
-
- ret = []
- for (k, v) in iteritems(addresses):
- if key_name and k == key_name:
- ret.extend([addrs['addr'] for addrs in v])
- else:
- for interface_spec in v:
- if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
- ret.append(interface_spec['addr'])
- return ret
-
-
def openstack_full_argument_spec(**kwargs):
spec = dict(
- cloud=dict(default=None, type='raw'),
- auth_type=dict(default=None),
- auth=dict(default=None, type='dict', no_log=True),
- region_name=dict(default=None),
- availability_zone=dict(default=None),
- validate_certs=dict(default=None, type='bool', aliases=['verify']),
- ca_cert=dict(default=None, aliases=['cacert']),
- client_cert=dict(default=None, aliases=['cert']),
- client_key=dict(default=None, no_log=True, aliases=['key']),
+ cloud=dict(type='raw'),
+ auth_type=dict(),
+ auth=dict(type='dict', no_log=True),
+ region_name=dict(),
+ validate_certs=dict(type='bool', aliases=['verify']),
+ ca_cert=dict(aliases=['cacert']),
+ client_cert=dict(aliases=['cert']),
+ client_key=dict(no_log=True, aliases=['key']),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
- api_timeout=dict(default=None, type='int'),
+ api_timeout=dict(type='int'),
interface=dict(
default='public', choices=['public', 'internal', 'admin'],
aliases=['endpoint_type']),
- sdk_log_path=dict(default=None, type='str'),
+ sdk_log_path=dict(),
sdk_log_level=dict(
- default='INFO', type='str', choices=['INFO', 'DEBUG']),
+ default='INFO', choices=['INFO', 'DEBUG']),
)
# Filter out all our custom parameters before passing to AnsibleModule
kwargs_copy = copy.deepcopy(kwargs)
@@ -345,7 +305,7 @@ class OpenStackModule:
"The '%s' module has been renamed to '%s' in openstack "
"collection: openstack.cloud.%s" % (
self.module_name, new_module_name, new_module_name),
- version='2.0.0', collection_name='openstack.cloud')
+ version='3.0.0', collection_name='openstack.cloud')
def openstack_cloud_from_module(self):
"""Sets up connection to cloud using provided options. Checks if all
diff --git a/ansible_collections/openstack/cloud/plugins/module_utils/resource.py b/ansible_collections/openstack/cloud/plugins/module_utils/resource.py
new file mode 100644
index 000000000..7f40de383
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/module_utils/resource.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Jakob Meng, <jakobmeng@web.de>
+# Copyright (c) 2023 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+class StateMachine:
+
+ @staticmethod
+ def default_crud_functions(connection, service_name, type_name):
+ session = getattr(connection, service_name)
+
+ create_function = getattr(session, 'create_{0}'.format(type_name))
+ delete_function = getattr(session, 'delete_{0}'.format(type_name))
+ find_function = getattr(session, 'find_{0}'.format(type_name))
+ get_function = getattr(session, 'get_{0}'.format(type_name))
+ list_function = getattr(session, '{0}s'.format(type_name))
+ update_function = getattr(session, 'update_{0}'.format(type_name))
+
+ return dict(
+ create=create_function,
+ delete=delete_function,
+ find=find_function,
+ get=get_function,
+ list=list_function,
+ update=update_function,
+ )
+
+ def __init__(self,
+ connection,
+ sdk,
+ type_name,
+ service_name,
+ crud_functions=None,
+ **kwargs):
+ for k in ['connection', 'sdk', 'service_name', 'type_name']:
+ setattr(self, k, locals()[k])
+
+ self.session = getattr(connection, service_name)
+
+ if not crud_functions:
+ crud_functions = StateMachine.default_crud_functions(
+ connection, service_name, type_name)
+
+ for k in ['create', 'delete', 'find', 'get', 'list', 'update']:
+ setattr(self, '{0}_function'.format(k), crud_functions[k])
+
+ # kwargs is for passing arguments to subclasses
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __call__(self, attributes, check_mode, state, timeout, wait,
+ updateable_attributes, non_updateable_attributes, **kwargs):
+ # kwargs is for passing arguments to subclasses
+
+ resource = self._find(attributes, **kwargs)
+
+ if check_mode:
+ return self._simulate(state, resource, attributes, timeout, wait,
+ updateable_attributes,
+ non_updateable_attributes, **kwargs)
+
+ if state == 'present' and not resource:
+ # Create resource
+ resource = self._create(attributes, timeout, wait, **kwargs)
+ return resource, True
+
+ elif state == 'present' and resource:
+ # Update resource
+ update = self._build_update(resource, attributes,
+ updateable_attributes,
+ non_updateable_attributes, **kwargs)
+ if update:
+ resource = self._update(resource, timeout, update, wait,
+ **kwargs)
+
+ return resource, bool(update)
+
+ elif state == 'absent' and resource:
+ # Delete resource
+ self._delete(resource, attributes, timeout, wait, **kwargs)
+ return None, True
+
+ elif state == 'absent' and not resource:
+ # Do nothing
+ return None, False
+
+ def _build_update(self, resource, attributes, updateable_attributes,
+ non_updateable_attributes, **kwargs):
+ update = {}
+
+ # Fetch details to populate all resource attributes
+ resource = self.get_function(resource['id'])
+
+ comparison_attributes = (
+ set(updateable_attributes
+ if updateable_attributes is not None
+ else attributes.keys())
+ - set(non_updateable_attributes
+ if non_updateable_attributes is not None
+ else []))
+
+ resource_attributes = dict(
+ (k, attributes[k])
+ for k in comparison_attributes
+ if not self._is_equal(attributes[k], resource[k]))
+
+ if resource_attributes:
+ update['resource_attributes'] = resource_attributes
+
+ return update
+
+ def _create(self, attributes, timeout, wait, **kwargs):
+ resource = self.create_function(**attributes)
+
+ if wait:
+ resource = self.sdk.resource.wait_for_status(self.session,
+ resource,
+ status='active',
+ failures=['error'],
+ wait=timeout,
+ attribute='status')
+
+ return resource
+
+ def _delete(self, resource, attributes, timeout, wait, **kwargs):
+ self.delete_function(resource['id'])
+
+ if wait:
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=timeout,
+ message="Timeout waiting for resource to be absent"
+ ):
+ if self._find(attributes) is None:
+ break
+
+ def _freeze(self, o):
+ if isinstance(o, dict):
+ return frozenset((k, self._freeze(v)) for k, v in o.items())
+
+ if isinstance(o, list):
+ return tuple(self._freeze(v) for v in o)
+
+ return o
+
+ def _is_equal(self, a, b):
+ if any([a is None and b is not None,
+ a is not None and b is None]):
+ return False
+
+ if a is None and b is None:
+ return True
+
+ if isinstance(a, list) and isinstance(b, list):
+ return self._freeze(a) == self._freeze(b)
+
+ if isinstance(a, dict) and isinstance(b, dict):
+ if set(a.keys()) != set(b.keys()):
+ return False
+ return self._freeze(a) == self._freeze(b)
+
+ # else
+ return a == b
+
+ def _find(self, attributes, **kwargs):
+ # use find_* functions for id instead of get_* functions because
+ # get_* functions raise exceptions when resources cannot be found
+ for k in ['id', 'name']:
+ if k in attributes:
+ return self.find_function(attributes[k])
+
+ matches = list(self._find_matches(attributes, **kwargs))
+ if len(matches) > 1:
+ self.fail_json(msg='Found more than a single resource'
+ ' which matches the given attributes.')
+ elif len(matches) == 1:
+ return matches[0]
+ else: # len(matches) == 0
+ return None
+
+ def _find_matches(self, attributes, **kwargs):
+ return self.list_function(**attributes)
+
+ def _update(self, resource, timeout, update, wait, **kwargs):
+ resource_attributes = update.get('resource_attributes')
+ if resource_attributes:
+ resource = self.update_function(resource['id'],
+ **resource_attributes)
+
+ if wait:
+ resource = self.sdk.resource.wait_for_status(self.session,
+ resource,
+ status='active',
+ failures=['error'],
+ wait=timeout,
+ attribute='status')
+
+ return resource
+
+ def _simulate(self, state, resource, attributes, timeout, wait,
+ updateable_attributes,
+ non_updateable_attributes, **kwargs):
+ if state == 'present' and not resource:
+ resource = self._simulate_create(attributes, timeout, wait,
+ **kwargs)
+ return resource, True
+ elif state == 'present' and resource:
+ update = self._build_update(resource, attributes,
+ updateable_attributes,
+ non_updateable_attributes,
+ **kwargs)
+ if update:
+ resource = self._simulate_update(resource, timeout, update,
+ wait, **kwargs)
+
+ return resource, bool(update)
+ elif state == 'absent' and resource:
+ return None, True
+ else:
+ # state == 'absent' and not resource:
+ return None, False
+
+ def _simulate_create(self, attributes, timeout, wait, **kwargs):
+ class Resource(dict):
+ def to_dict(self, *args, **kwargs):
+ return self
+
+ return Resource(attributes)
+
+ def _simulate_update(self, resource, timeout, update, wait, **kwargs):
+ resource_attributes = update.get('resource_attributes')
+ if resource_attributes:
+ for k, v in resource_attributes.items():
+ resource[k] = v
+
+ return resource
diff --git a/ansible_collections/openstack/cloud/plugins/modules/address_scope.py b/ansible_collections/openstack/cloud/plugins/modules/address_scope.py
index eb5b187af..3a0d5ce45 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/address_scope.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/address_scope.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Uemit Seren <uemit.seren@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -21,33 +21,32 @@ options:
name:
description:
- Name to be give to the address scope
+ - This option cannot be updated.
required: true
type: str
project:
description:
- Unique name or ID of the project.
+ - This option cannot be updated.
type: str
ip_version:
description:
- - The IP version of the subnet 4 or 6
+ - The IP version of the subnet 4 or 6.
+ - This option cannot be updated.
default: '4'
type: str
choices: ['4', '6']
- shared:
+ is_shared:
description:
- Whether this address scope is shared or not.
type: bool
- default: 'no'
+ default: 'false'
+ aliases: ['shared']
extra_specs:
description:
- Dictionary with extra key/value pairs passed to the API
- required: false
default: {}
type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -78,29 +77,33 @@ RETURN = '''
address_scope:
description: Dictionary describing the address scope.
returned: On success when I(state) is 'present'
- type: complex
+ type: dict
contains:
id:
description: Address Scope ID.
type: str
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ ip_version:
+ description: The IP version of the subnet 4 or 6.
+ type: str
+ sample: "4"
+ is_shared:
+ description: Indicates whether this address scope is shared across
+ all tenants.
+ type: bool
+ sample: false
name:
description: Address Scope name.
type: str
sample: "my_address_scope"
+ project_id:
+ description: The project ID
+ type: str
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
tenant_id:
description: The tenant ID.
type: str
sample: "861174b82b43463c9edc5202aadc60ef"
- ip_version:
- description: The IP version of the subnet 4 or 6.
- type: str
- sample: "4"
- is_shared:
- description: Indicates whether this address scope is shared across all tenants.
- type: bool
- sample: false
-
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -110,23 +113,20 @@ class AddressScopeModule(OpenStackModule):
argument_spec = dict(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
- shared=dict(default=False, type='bool'),
- ip_version=dict(type='str', default='4', choices=['4', '6']),
- project=dict(default=None),
+ is_shared=dict(default=False, type='bool', aliases=['shared']),
+ ip_version=dict(default='4', choices=['4', '6']),
+ project=dict(),
extra_specs=dict(type='dict', default=dict())
)
- def _needs_update(self, address_scope, filters=None):
+ def _needs_update(self, address_scope):
"""Decide if the given address_scope needs an update.
"""
- ip_version = int(self.params['ip_version'])
- if address_scope['is_shared'] != self.params['shared']:
+ if address_scope['is_shared'] != self.params['is_shared']:
return True
- if ip_version and address_scope['ip_version'] != ip_version:
- self.fail_json(msg='Cannot update ip_version in existing address scope')
return False
- def _system_state_change(self, address_scope, filters=None):
+ def _system_state_change(self, address_scope):
"""Check if the system state would be changed."""
state = self.params['state']
if state == 'absent' and address_scope:
@@ -134,27 +134,26 @@ class AddressScopeModule(OpenStackModule):
if state == 'present':
if not address_scope:
return True
- return self._needs_update(address_scope, filters)
+ return self._needs_update(address_scope)
return False
def run(self):
state = self.params['state']
name = self.params['name']
- shared = self.params['shared']
+ is_shared = self.params['is_shared']
ip_version = self.params['ip_version']
- project = self.params['project']
+ project_name_or_id = self.params['project']
extra_specs = self.params['extra_specs']
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail(msg='Project %s could not be found' % project)
- project_id = proj['id']
+ if project_name_or_id is not None:
+ project_id = self.conn.identity.find_project(
+ project_name_or_id, ignore_missing=False)['id']
else:
- project_id = self.conn.current_project_id
+ project_id = self.conn.session.get_project_id()
- address_scope = self.conn.network.find_address_scope(name_or_id=name)
+ address_scope = self.conn.network.find_address_scope(
+ name_or_id=name, project_id=project_id)
if self.ansible.check_mode:
self.exit_json(
changed=self._system_state_change(address_scope)
@@ -167,26 +166,28 @@ class AddressScopeModule(OpenStackModule):
kwargs = dict(
name=name,
ip_version=ip_version,
- is_shared=shared,
- tenant_id=project_id)
+ is_shared=is_shared,
+ project_id=project_id)
dup_args = set(kwargs.keys()) & set(extra_specs.keys())
if dup_args:
raise ValueError('Duplicate key(s) {0} in extra_specs'
.format(list(dup_args)))
kwargs = dict(kwargs, **extra_specs)
- address_scope = self.conn.network.create_address_scope(**kwargs)
+ address_scope = \
+ self.conn.network.create_address_scope(**kwargs)
changed = True
- else:
- if self._needs_update(address_scope):
- address_scope = self.conn.network.update_address_scope(address_scope['id'], is_shared=shared)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, address_scope=address_scope, id=address_scope['id'])
+
+ elif self._needs_update(address_scope):
+ address_scope = self.conn.network.update_address_scope(
+ address_scope['id'], is_shared=is_shared)
+ changed = True
+
+ self.exit_json(changed=changed,
+ address_scope=address_scope.to_dict(computed=False))
elif state == 'absent':
if not address_scope:
- self.exit(changed=False)
+ self.exit_json(changed=False)
else:
self.conn.network.delete_address_scope(address_scope['id'])
self.exit_json(changed=True)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/auth.py b/ansible_collections/openstack/cloud/plugins/modules/auth.py
index 1f2c516e4..e991209d0 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/auth.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/auth.py
@@ -1,56 +1,40 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: auth
-short_description: Retrieve an auth token
+short_description: Retrieve auth token from OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Retrieve an auth token from an OpenStack Cloud
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
+ - Retrieve auth token from OpenStack cloud
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-- name: Authenticate to the cloud and retrieve the service catalog
+EXAMPLES = r'''
+- name: Authenticate to cloud and return auth token
openstack.cloud.auth:
cloud: rax-dfw
-
-- name: Show service catalog
- debug:
- var: service_catalog
'''
-RETURN = '''
+RETURN = r'''
auth_token:
description: Openstack API Auth Token
returned: success
type: str
-service_catalog:
- description: A dictionary of available API endpoints
- returned: success
- type: dict
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class AuthModule(OpenStackModule):
- argument_spec = dict()
- module_kwargs = dict()
-
def run(self):
- self.exit_json(
- changed=False,
- ansible_facts=dict(
- auth_token=self.conn.auth_token,
- service_catalog=self.conn.service_catalog))
+ self.exit_json(changed=False,
+ auth_token=self.conn.auth_token)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_deploy_template.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_deploy_template.py
new file mode 100644
index 000000000..6eee82767
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_deploy_template.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 StackHPC Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+module: baremetal_deploy_template
+short_description: Create/Delete Bare Metal deploy template Resources from OpenStack
+author: OpenStack Ansible SIG
+description:
+ - Create, Update and Remove ironic deploy templates from OpenStack.
+options:
+ extra:
+ description:
+ - A set of one or more arbitrary metadata key and value pairs.
+ type: dict
+ id:
+ description:
+ - ID of the deploy template.
+ - Will be auto-generated if not specified.
+ type: str
+ aliases: ['uuid']
+ name:
+ description:
+ - Name of the deploy template.
+ - Must be formatted as a trait name (see API reference).
+ - Required when the deploy template is created, after which the
+ name or ID may be used.
+ type: str
+ steps:
+ description:
+ - List of deploy steps to apply.
+ - Required when the deploy template is created.
+ type: list
+ elements: dict
+ state:
+ description:
+ - Indicates desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ type: str
+extends_documentation_fragment:
+- openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+- name: Create Bare Metal deploy template
+ openstack.cloud.baremetal_deploy_template:
+ cloud: devstack
+ state: present
+ name: CUSTOM_FOO
+ steps:
+ - interface: bios
+ step: apply_configuration
+ args:
+ settings:
+ - name: LogicalProc
+ value: Enabled
+ priority: 110
+ extra:
+ something: extra
+ register: result
+
+- name: Delete Bare Metal deploy template
+ openstack.cloud.baremetal_deploy_template:
+ cloud: devstack
+ state: absent
+ id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
+ register: result
+
+- name: Update Bare Metal deploy template
+ openstack.cloud.baremetal_deploy_template:
+ cloud: devstack
+ state: present
+ id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
+ extra:
+ something: new
+'''
+
+RETURN = r'''
+template:
+ description: A deploy template dictionary, subset of the dictionary keys
+ listed below may be returned, depending on your cloud
+ provider.
+ returned: success
+ type: dict
+ contains:
+ created_at:
+ description: Bare Metal deploy template created at timestamp.
+ returned: success
+ type: str
+ extra:
+ description: A set of one or more arbitrary metadata key and value
+ pairs.
+ returned: success
+ type: dict
+ id:
+ description: The UUID for the Baremetal Deploy Template resource.
+ returned: success
+ type: str
+ links:
+ description: A list of relative links, including the self and
+ bookmark links.
+ returned: success
+ type: list
+ location:
+ description: Cloud location of this resource (cloud, project,
+ region, zone)
+ returned: success
+ type: dict
+ name:
+ description: Bare Metal deploy template name.
+ returned: success
+ type: str
+ steps:
+ description: A list of deploy steps.
+ returned: success
+ type: list
+ elements: dict
+ updated_at:
+ description: Bare Metal deploy template updated at timestamp.
+ returned: success
+ type: str
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
+ OpenStackModule
+)
+
+
+class BaremetalDeployTemplateModule(OpenStackModule):
+ argument_spec = dict(
+ extra=dict(type='dict'),
+ id=dict(aliases=['uuid']),
+ name=dict(),
+ steps=dict(type='list', elements='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_kwargs = dict(
+ required_one_of=[
+ ('id', 'name'),
+ ],
+ )
+
+ def run(self):
+ template = self._find_deploy_template()
+ state = self.params['state']
+ if state == 'present':
+ # create or update deploy template
+
+ kwargs = {}
+ for k in ['extra', 'id', 'name', 'steps']:
+ if self.params[k] is not None:
+ kwargs[k] = self.params[k]
+
+ changed = True
+ if not template:
+ # create deploy template
+ template = self.conn.baremetal.create_deploy_template(**kwargs)
+ else:
+ # update deploy template
+ updates = dict((k, v)
+ for k, v in kwargs.items()
+ if v != template[k])
+
+ if updates:
+ template = \
+ self.conn.baremetal.update_deploy_template(template['id'], **updates)
+ else:
+ changed = False
+
+ self.exit_json(changed=changed, template=template.to_dict(computed=False))
+
+ if state == 'absent':
+ # remove deploy template
+ if not template:
+ self.exit_json(changed=False)
+
+ template = self.conn.baremetal.delete_deploy_template(template['id'])
+ self.exit_json(changed=True)
+
+ def _find_deploy_template(self):
+ id_or_name = self.params['id'] if self.params['id'] else self.params['name']
+ try:
+ return self.conn.baremetal.get_deploy_template(id_or_name)
+ except self.sdk.exceptions.ResourceNotFound:
+ return None
+
+
+def main():
+ module = BaremetalDeployTemplateModule()
+ module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_inspect.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_inspect.py
index f7d90d1c5..0f9c37354 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_inspect.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_inspect.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -10,63 +10,280 @@ module: baremetal_inspect
short_description: Explicitly triggers baremetal node introspection in ironic.
author: OpenStack Ansible SIG
description:
- - Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
- This command may be out of band or in-band depending on the ironic driver configuration.
- This is only possible on nodes in 'manageable' and 'available' state.
+ - Requests Ironic to set a node into inspect state in order to collect
+ metadata regarding the node. This command may be out of band or in-band
+ depending on the ironic driver configuration. This is only possible on
+ nodes in 'manageable' and 'available' state.
options:
mac:
description:
- unique mac address that is used to attempt to identify the host.
type: str
- uuid:
- description:
- - globally unique identifier (UUID) to identify the host.
- type: str
name:
description:
- - unique name identifier to identify the host in Ironic.
- type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
- Use with "auth" and "auth_type" settings set to None.
+ - Name or id of the node to inspect.
+ - Mutually exclusive with I(mac)
type: str
- timeout:
- description:
- - A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
- default: 1200
- type: int
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ aliases: [id, uuid]
extends_documentation_fragment:
- openstack.cloud.openstack
'''
RETURN = '''
-ansible_facts:
- description: Dictionary of new facts representing discovered properties of the node..
- returned: changed
- type: complex
- contains:
+node:
+ description: A dictionary describing the node after inspection
+ returned: changed
+ type: dict
+ contains:
+ allocation_id:
+ description: The UUID of the allocation associated with the node.
+ type: str
+ bios_interface:
+ description: The bios interface to be used for this node.
+ type: str
+ boot_interface:
+ description: The boot interface for a Node, e.g. "pxe".
+ type: str
+ boot_mode:
+ description: The current boot mode state (uefi/bios).
+ type: str
+ chassis_id:
+ description: UUID of the chassis associated with this Node.
+ type: str
+ clean_step:
+ description: |
+ The current clean step. Introduced with the cleaning feature.
+ type: str
+ conductor:
+ description: The conductor currently servicing a node.
+ type: str
+ conductor_group:
+ description: The conductor group for a node.
+ type: str
+ console_interface:
+ description: Console interface to use when working with serial console.
+ type: str
+ sample: no-console
+ created_at:
+ description: Timestamp at which the node was last updated.
+ type: str
+ deploy_interface:
+ description: The deploy interface for a node
+ type: str
+ sample: iscsi
+ deploy_step:
+ description: The current deploy step.
+ type: str
+ driver:
+ description: The name of the driver.
+ type: str
+ driver_info:
+ description: |
+ All the metadata required by the driver to manage this Node. List
+ of fields varies between drivers.
+ type: dict
+ driver_internal_info:
+ description: Internal metadata set and stored by the Node's driver.
+ type: dict
+ extra:
+ description: A set of one or more arbitrary metadata key and value pairs.
+ type: dict
+ fault:
+ description: |
+ The fault indicates the active fault detected by ironic, typically the
+ Node is in "maintenance mode". None means no fault has been detected by
+ ironic. "power failure" indicates ironic failed to retrieve power state
+ from this node. There are other possible types, e.g., "clean failure"
+ and "rescue abort failure".
+ type: str
+ id:
+ description: The UUID for the resource.
+ type: str
+ inspect_interface:
+ description: The interface used for node inspection.
+ type: str
+ sample: no-inspect
+ instance_id:
+ description: UUID of the Nova instance associated with this Node.
+ type: str
+ instance_info:
+ description: |
+ Information used to customize the deployed image. May include root
+ partition size, a base 64 encoded config drive, and other metadata.
+ Note that this field is erased automatically when the instance is
+ deleted (this is done by requesting the Node provision state be changed
+ to DELETED).
+ type: dict
+ is_automated_clean_enabled:
+ description: Override enabling of automated cleaning.
+ type: bool
+ is_console_enabled:
+ description: |
+ Indicates whether console access is enabled or disabled on this node.
+ type: bool
+ is_maintenance:
+ description: |
+ Whether or not this Node is currently in "maintenance mode". Setting
+ a Node into maintenance mode removes it from the available resource
+ pool and halts some internal automation. This can happen manually (eg,
+ via an API request) or automatically when Ironic detects a hardware
+ fault that prevents communication with the machine.
+ type: bool
+ is_protected:
+ description: |
+ Whether the node is protected from undeploying, rebuilding and
+ deletion.
+ type: bool
+ is_retired:
+ description: Whether the node is marked for retirement.
+ type: bool
+ is_secure_boot:
+ description: |
+ Whether the node is currently booted with secure boot turned on.
+ type: bool
+ last_error:
+ description: |
+ Any error from the most recent (last) transaction that started but
+ failed to finish.
+ type: str
+ links:
+ description: |
+ A list of relative links. Includes the self and bookmark links.
+ type: list
+ maintenance_reason:
+ description: |
+ User-settable description of the reason why this Node was placed into
+ maintenance mode.
+ type: str
+ management_interface:
+ description: Interface for out-of-band node management.
+ type: str
+ sample: ipmitool
+ name:
+ description: |
+ Human-readable identifier for the Node resource. Certain words are
+ reserved.
+ type: str
+ network_interface:
+ description: |
+ Which Network Interface provider to use when plumbing the network
+ connections for this Node.
+ type: str
+ owner:
+ description: A string or UUID of the tenant who owns the object.
+ type: str
+ port_groups:
+ description: Links to the collection of portgroups on this node.
+ type: list
+ ports:
+ description: Links to the collection of ports on this node
+ type: list
+ power_interface:
+ description: Interface used for performing power actions on the node.
+ type: str
+ sample: ipmitool
+ power_state:
+ description: |
+ The current power state of this Node. Usually, "power on" or "power
+ off", but may be "None" if Ironic is unable to determine the power
+ state (eg, due to hardware failure).
+ type: str
+ properties:
+ description: Properties of the node as found by inspection
+ type: dict
+ contains:
memory_mb:
- description: Amount of node memory as updated in the node properties
- type: str
- sample: "1024"
+ description: Amount of node memory as updated in the node properties
+ type: str
+ sample: "1024"
cpu_arch:
- description: Detected CPU architecture type
- type: str
- sample: "x86_64"
+ description: Detected CPU architecture type
+ type: str
+ sample: "x86_64"
local_gb:
- description: Total size of local disk storage as updated in node properties.
- type: str
- sample: "10"
+ description: |
+ Total size of local disk storage as updated in node properties.
+ type: str
+ sample: "10"
cpus:
- description: Count of cpu cores defined in the updated node properties.
- type: str
- sample: "1"
+ description: |
+ Count of cpu cores defined in the updated node properties.
+ type: str
+ sample: "1"
+ protected_reason:
+ description: The reason the node is marked as protected.
+ type: str
+ provision_state:
+ description: The current provisioning state of this Node.
+ type: str
+ raid_config:
+ description: |
+ Represents the current RAID configuration of the node. Introduced with
+ the cleaning feature.
+ type: dict
+ raid_interface:
+ description: Interface used for configuring RAID on this node.
+ type: str
+ sample: no-raid
+ rescue_interface:
+ description: The interface used for node rescue.
+ type: str
+ sample: no-rescue
+ reservation:
+ description: |
+ The name of an Ironic Conductor host which is holding a lock on this
+ node, if a lock is held. Usually "null", but this field can be useful
+ for debugging.
+ type: str
+ resource_class:
+ description: |
+ A string which can be used by external schedulers to identify this
+ Node as a unit of a specific type of resource.
+ type: str
+ retired_reason:
+ description: TODO
+ type: str
+ states:
+ description: |
+ Links to the collection of states. Note that this resource is also
+ used to request state transitions.
+ type: list
+ storage_interface:
+ description: |
+ Interface used for attaching and detaching volumes on this node, e.g.
+ "cinder".
+ type: str
+ target_power_state:
+ description: |
+ If a power state transition has been requested, this field represents
+ the requested (ie, "target") state, either "power on" or "power off".
+ type: str
+ target_provision_state:
+ description: |
+ If a provisioning action has been requested, this field represents
+ the requested (ie, "target") state. Note that a Node may go through
+ several states during its transition to this target state. For
+ instance, when requesting an instance be deployed to an AVAILABLE
+ Node, the Node may go through the following state change progression:
+ AVAILABLE -> DEPLOYING -> DEPLOYWAIT -> DEPLOYING -> ACTIVE.
+ type: str
+ target_raid_config:
+ description: |
+ Represents the requested RAID configuration of the node, which will
+ be applied when the Node next transitions through the CLEANING state.
+ Introduced with the cleaning feature.
+ type: dict
+ traits:
+ description: List of traits for this node.
+ type: list
+ updated_at:
+ description: TODO
+ type: str
+ vendor_interface:
+ description: |
+ Interface for vendor-specific functionality on this node, e.g.
+ "no-vendor".
+ type: str
'''
EXAMPLES = '''
@@ -75,58 +292,50 @@ EXAMPLES = '''
name: "testnode1"
'''
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
-
+class BaremetalInspectModule(OpenStackModule):
+ argument_spec = dict(
+ name=dict(aliases=['uuid', 'id']),
+ mac=dict(),
+ )
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- mac=dict(required=False),
- timeout=dict(default=1200, type='int', required=False),
+ module_kwargs = dict(
+ mutually_exclusive=[
+ ('name', 'mac'),
+ ],
+ required_one_of=[
+ ('name', 'mac'),
+ ],
)
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- if module.params['name'] or module.params['uuid']:
- server = cloud.get_machine(_choose_id_value(module))
- elif module.params['mac']:
- server = cloud.get_machine_by_mac(module.params['mac'])
+ def run(self):
+ node_name_or_id = self.params['name']
+ node = None
+ if node_name_or_id is not None:
+ node = self.conn.baremetal.find_node(node_name_or_id)
else:
- module.fail_json(msg="The worlds did not align, "
- "the host was not found as "
- "no name, uuid, or mac was "
- "defined.")
- if server:
- cloud.inspect_machine(server['uuid'], module.params['wait'])
- # TODO(TheJulia): diff properties, ?and ports? and determine
- # if a change occurred. In theory, the node is always changed
- # if introspection is able to update the record.
- module.exit_json(changed=True,
- ansible_facts=server['properties'])
+ node = self.conn.get_machine_by_mac(self.params['mac'])
- else:
- module.fail_json(msg="node not found.")
+ if node is None:
+ self.fail_json(msg="node not found.")
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+ node = self.conn.inspect_machine(node['id'],
+ wait=self.params['wait'],
+ timeout=self.params['timeout'])
+ node = node.to_dict(computed=False)
+ # TODO(TheJulia): diff properties, ?and ports? and determine
+ # if a change occurred. In theory, the node is always changed
+ # if introspection is able to update the record.
+ self.exit_json(changed=True, node=node)
+
+
+def main():
+ module = BaremetalInspectModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node.py
index 1adb560db..b8ef84e22 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node.py
@@ -1,10 +1,10 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# (c) 2014, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: baremetal_node
short_description: Create/Delete Bare Metal Resources from OpenStack
@@ -12,105 +12,69 @@ author: OpenStack Ansible SIG
description:
- Create or Remove Ironic nodes from OpenStack.
options:
- state:
- description:
- - Indicates desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- uuid:
- description:
- - globally unique identifier (UUID) to be given to the resource. Will
- be auto-generated if not specified, and name is specified.
- - Definition of a UUID will always take precedence to a name value.
- type: str
- name:
+ bios_interface:
description:
- - unique name identifier to be given to the resource.
+ - The bios interface for this node, e.g. C(no-bios).
type: str
- driver:
+ boot_interface:
description:
- - The name of the Ironic Driver to use with this node.
- - Required when I(state=present)
+ - The boot interface for this node, e.g. C(pxe).
type: str
- chassis_uuid:
+ chassis_id:
description:
- Associate the node with a pre-defined chassis.
type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
- type: str
- resource_class:
+ aliases: ['chassis_uuid']
+ console_interface:
description:
- - The specific resource type to which this node belongs.
+ - The console interface for this node, e.g. C(no-console).
type: str
- bios_interface:
+ deploy_interface:
description:
- - The bios interface for this node, e.g. "no-bios".
+ - The deploy interface for this node, e.g. C(iscsi).
type: str
- boot_interface:
+ driver:
description:
- - The boot interface for this node, e.g. "pxe".
+ - The name of the Ironic Driver to use with this node.
+ - Required when I(state) is C(present)
type: str
- console_interface:
+ driver_info:
description:
- - The console interface for this node, e.g. "no-console".
- type: str
- deploy_interface:
+ - Information for this node's driver. Will vary based on which
+ driver is in use. Any sub-field which is populated will be validated
+ during creation. For compatibility reasons sub-fields `power`,
+ `deploy`, `management` and `console` are flattened.
+ required: true
+ type: dict
+ id:
description:
- - The deploy interface for this node, e.g. "iscsi".
+ - ID to be given to the baremetal node. Will be auto-generated on
+ creation if not specified, and I(name) is specified.
+ - Definition of I(id) will always take precedence over I(name).
type: str
+ aliases: ['uuid']
inspect_interface:
description:
- - The interface used for node inspection, e.g. "no-inspect".
+ - The interface used for node inspection, e.g. C(no-inspect).
type: str
management_interface:
description:
- The interface for out-of-band management of this node, e.g.
"ipmitool".
type: str
+ name:
+ description:
+ - unique name identifier to be given to the resource.
+ type: str
network_interface:
description:
- The network interface provider to use when describing
connections for this node.
type: str
- power_interface:
- description:
- - The interface used to manage power actions on this node, e.g.
- "ipmitool".
- type: str
- raid_interface:
- description:
- - Interface used for configuring raid on this node.
- type: str
- rescue_interface:
- description:
- - Interface used for node rescue, e.g. "no-rescue".
- type: str
- storage_interface:
- description:
- - Interface used for attaching and detaching volumes on this node, e.g.
- "cinder".
- type: str
- vendor_interface:
- description:
- - Interface for all vendor-specific actions on this node, e.g.
- "no-vendor".
- type: str
- driver_info:
- description:
- - Information for this server's driver. Will vary based on which
- driver is in use. Any sub-field which is populated will be validated
- during creation. For compatibility reasons sub-fields `power`,
- `deploy`, `management` and `console` are flattened.
- required: true
- type: dict
nics:
description:
- - 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"'
+ - 'A list of network interface cards, eg, C( - mac: aa:bb:cc:aa:bb:cc)'
+ - This node attribute cannot be updated.
required: true
type: list
elements: dict
@@ -119,322 +83,605 @@ options:
description: The MAC address of the network interface card.
type: str
required: true
+ power_interface:
+ description:
+ - The interface used to manage power actions on this node, e.g.
+ C(ipmitool).
+ type: str
properties:
description:
- - Definition of the physical characteristics of this server, used for scheduling purposes
+ - Definition of the physical characteristics of this node
+ - Used for scheduling purposes
type: dict
suboptions:
cpu_arch:
description:
- CPU architecture (x86_64, i686, ...)
- default: x86_64
+ type: str
cpus:
description:
- Number of CPU cores this machine has
- default: 1
- ram:
+ type: str
+ memory_mb:
description:
- - amount of RAM this machine has, in MB
- default: 1
- disk_size:
+ - Amount of RAM in MB this machine has
+ aliases: ['ram']
+ type: str
+ local_gb:
description:
- - size of first storage device in this machine (typically /dev/sda), in GB
- default: 1
+ - Size in GB of first storage device in this machine (typically
+ /dev/sda)
+ aliases: ['disk_size']
+ type: str
capabilities:
description:
- - special capabilities for the node, such as boot_option, node_role etc
- (see U(https://docs.openstack.org/ironic/latest/install/advanced.html)
- for more information)
- default: ""
+ - Special capabilities for this node such as boot_option etc.
+ - For more information refer to
+ U(https://docs.openstack.org/ironic/latest/install/advanced.html).
+ type: str
root_device:
description:
- Root disk device hints for deployment.
- - See U(https://docs.openstack.org/ironic/latest/install/advanced.html#specifying-the-disk-for-deployment-root-device-hints)
- for allowed hints.
- default: ""
+ - For allowed hints refer to
+ U(https://docs.openstack.org/ironic/latest/install/advanced.html).
+ type: dict
+ raid_interface:
+ description:
+ - Interface used for configuring raid on this node.
+ type: str
+ rescue_interface:
+ description:
+ - Interface used for node rescue, e.g. C(no-rescue).
+ type: str
+ resource_class:
+ description:
+ - The specific resource type to which this node belongs.
+ type: str
skip_update_of_masked_password:
description:
- - Allows the code that would assert changes to nodes to skip the
- update if the change is a single line consisting of the password
- field.
- - As of Kilo, by default, passwords are always masked to API
- requests, which means the logic as a result always attempts to
- re-assert the password field.
- - C(skip_update_of_driver_password) is deprecated alias and will be removed in openstack.cloud 2.0.0.
+ - Deprecated, no longer used.
+ - Updating or specifing a password has not been supported for a while.
type: bool
- aliases:
- - skip_update_of_driver_password
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
- - "jsonpatch"
-
+ state:
+ description:
+ - Indicates desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ storage_interface:
+ description:
+ - Interface used for attaching and detaching volumes on this node, e.g.
+ C(cinder).
+ type: str
+ timeout:
+ description:
+ - Number of seconds to wait for the newly created node to reach the
+ available state.
+ type: int
+ default: 1800
+ vendor_interface:
+ description:
+ - Interface for all vendor-specific actions on this node, e.g.
+ C(no-vendor).
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Enroll a node with some basic properties and driver info
-- openstack.cloud.baremetal_node:
+EXAMPLES = r'''
+- name: Enroll a node with some basic properties and driver info
+ openstack.cloud.baremetal_node:
+ chassis_id: "00000000-0000-0000-0000-000000000001"
cloud: "devstack"
driver: "pxe_ipmitool"
- uuid: "00000000-0000-0000-0000-000000000002"
- properties:
- cpus: 2
- cpu_arch: "x86_64"
- ram: 8192
- disk_size: 64
- capabilities: "boot_option:local"
- root_device:
- wwn: "0x4000cca77fc4dba1"
- nics:
- - mac: "aa:bb:cc:aa:bb:cc"
- - mac: "dd:ee:ff:dd:ee:ff"
driver_info:
ipmi_address: "1.2.3.4"
ipmi_username: "admin"
ipmi_password: "adminpass"
- chassis_uuid: "00000000-0000-0000-0000-000000000001"
-
+ id: "00000000-0000-0000-0000-000000000002"
+ nics:
+ - mac: "aa:bb:cc:aa:bb:cc"
+ - mac: "dd:ee:ff:dd:ee:ff"
+ properties:
+ capabilities: "boot_option:local"
+ cpu_arch: "x86_64"
+ cpus: 2
+ local_gb: 64
+ memory_mb: 8192
+ root_device:
+ wwn: "0x4000cca77fc4dba1"
'''
-try:
- import jsonpatch
- HAS_JSONPATCH = True
-except ImportError:
- HAS_JSONPATCH = False
-
+RETURN = r'''
+node:
+ description: Dictionary describing the Bare Metal node.
+ type: dict
+ returned: On success when I(state) is 'present'.
+ contains:
+ allocation_id:
+ description: The UUID of the allocation associated with the node.
+ If not null, will be the same as instance_id (the
+ opposite is not always true). Unlike instance_id,
+ this field is read-only. Please use the Allocation API
+ to remove allocations.
+ returned: success
+ type: str
+ bios_interface:
+ description: The bios interface to be used for this node.
+ returned: success
+ type: str
+ boot_interface:
+ description: The boot interface for a node, e.g. "pxe".
+ returned: success
+ type: str
+ boot_mode:
+ description: The boot mode for a node, either "uefi" or "bios"
+ returned: success
+ type: str
+ chassis_id:
+ description: UUID of the chassis associated with this node. May be
+ empty or None.
+ returned: success
+ type: str
+ clean_step:
+ description: The current clean step.
+ returned: success
+ type: str
+ conductor:
+ description: |
+ The conductor currently servicing a node.
+ returned: success
+ type: str
+ conductor_group:
+ description: The conductor group for a node.
+ returned: success
+ type: str
+ console_interface:
+ description: The console interface for a node, e.g. "no-console".
+ returned: success
+ type: str
+ created_at:
+ description: Bare Metal node created at timestamp.
+ returned: success
+ type: str
+ deploy_interface:
+ description: The deploy interface for a node, e.g. "direct".
+ returned: success
+ type: str
+ deploy_step:
+ description: The current deploy step.
+ returned: success
+ type: str
+ driver:
+ description: The name of the driver.
+ returned: success
+ type: str
+ driver_info:
+ description: All the metadata required by the driver to manage this
+ node. List of fields varies between drivers, and can
+ be retrieved from the
+ /v1/drivers/<DRIVER_NAME>/properties resource.
+ returned: success
+ type: dict
+ driver_internal_info:
+ description: Internal metadata set and stored by the node's driver.
+ returned: success
+ type: dict
+ extra:
+ description: A set of one or more arbitrary metadata key and value
+ pairs.
+ returned: success
+ type: dict
+ fault:
+ description: The fault indicates the active fault detected by
+ ironic, typically the node is in "maintenance mode".
+ None means no fault has been detected by ironic.
+ "power failure" indicates ironic failed to retrieve
+ power state from this node. There are other possible
+ types, e.g., "clean failure" and "rescue abort
+ failure".
+ returned: success
+ type: str
+ id:
+ description: The UUID for the resource.
+ returned: success
+ type: str
+ inspect_interface:
+ description: The interface used for node inspection.
+ returned: success
+ type: str
+ instance_id:
+ description: UUID of the Nova instance associated with this node.
+ returned: success
+ type: str
+ instance_info:
+ description: Information used to customize the deployed image. May
+ include root partition size, a base 64 encoded config
+ drive, and other metadata. Note that this field is
+ erased automatically when the instance is deleted
+ (this is done by requesting the node provision state
+ be changed to DELETED).
+ returned: success
+ type: dict
+ is_automated_clean_enabled:
+ description: Indicates whether the node will perform automated
+ clean or not.
+ returned: success
+ type: bool
+ is_console_enabled:
+ description: Indicates whether console access is enabled or
+ disabled on this node.
+ returned: success
+ type: bool
+ is_maintenance:
+ description: Whether or not this node is currently in "maintenance
+ mode". Setting a node into maintenance mode removes it
+ from the available resource pool and halts some
+ internal automation. This can happen manually (eg, via
+ an API request) or automatically when Ironic detects a
+ hardware fault that prevents communication with the
+ machine.
+ returned: success
+ type: bool
+ is_protected:
+ description: Whether the node is protected from undeploying,
+ rebuilding and deletion.
+ returned: success
+ type: bool
+ is_retired:
+ description: Whether the node is retired and can hence no longer be
+ provided, i.e. move from manageable to available, and
+ will end up in manageable after cleaning (rather than
+ available).
+ returned: success
+ type: bool
+ is_secure_boot:
+ description: Indicates whether node is currently booted with
+ secure_boot turned on.
+ returned: success
+ type: bool
+ last_error:
+ description: Any error from the most recent (last) transaction that
+ started but failed to finish.
+ returned: success
+ type: str
+ links:
+ description: A list of relative links, including self and bookmark
+ links.
+ returned: success
+ type: list
+ maintenance_reason:
+ description: User-settable description of the reason why this node
+ was placed into maintenance mode
+ returned: success
+ type: str
+ management_interface:
+ description: Interface for out-of-band node management.
+ returned: success
+ type: str
+ name:
+ description: Human-readable identifier for the node resource. May
+ be undefined. Certain words are reserved.
+ returned: success
+ type: str
+ network_interface:
+ description: Which Network Interface provider to use when plumbing
+ the network connections for this node.
+ returned: success
+ type: str
+ owner:
+ description: A string or UUID of the tenant who owns the object.
+ returned: success
+ type: str
+ ports:
+ description: List of ironic ports on this node.
+ returned: success
+ type: list
+ port_groups:
+ description: List of ironic port groups on this node.
+ returned: success
+ type: list
+ power_interface:
+ description: Interface used for performing power actions on the
+ node, e.g. "ipmitool".
+ returned: success
+ type: str
+ power_state:
+ description: The current power state of this node. Usually, "power
+ on" or "power off", but may be "None" if Ironic is
+ unable to determine the power state (eg, due to
+ hardware failure).
+ returned: success
+ type: str
+ properties:
+ description: Physical characteristics of this node. Populated by
+ ironic-inspector during inspection. May be edited via
+ the REST API at any time.
+ returned: success
+ type: dict
+ protected_reason:
+ description: The reason the node is marked as protected.
+ returned: success
+ type: str
+ provision_state:
+ description: The current provisioning state of this node.
+ returned: success
+ type: str
+ raid_config:
+ description: Represents the current RAID configuration of the node.
+ Introduced with the cleaning feature.
+ returned: success
+ type: dict
+ raid_interface:
+ description: Interface used for configuring RAID on this node.
+ returned: success
+ type: str
+ rescue_interface:
+ description: The interface used for node rescue, e.g. "no-rescue".
+ returned: success
+ type: str
+ reservation:
+ description: The name of an Ironic Conductor host which is holding
+ a lock on this node, if a lock is held. Usually
+ "null", but this field can be useful for debugging.
+ returned: success
+ type: str
+ resource_class:
+ description: A string which can be used by external schedulers to
+ identify this node as a unit of a specific type of
+ resource. For more details, see
+ https://docs.openstack.org/ironic/latest/install/configure-nova-flavors.html
+ returned: success
+ type: str
+ retired_reason:
+ description: The reason the node is marked as retired.
+ returned: success
+ type: str
+ states:
+ description: Links to the collection of states.
+ returned: success
+ type: list
+ storage_interface:
+ description: Interface used for attaching and detaching volumes on
+ this node, e.g. "cinder".
+ returned: success
+ type: str
+ target_power_state:
+ description: If a power state transition has been requested, this
+ field represents the requested (ie, "target") state,
+ either "power on" or "power off".
+ returned: success
+ type: str
+ target_provision_state:
+ description: If a provisioning action has been requested, this
+ field represents the requested (ie, "target") state.
+ Note that a node may go through several states during
+ its transition to this target state. For instance,
+ when requesting an instance be deployed to an
+ AVAILABLE node, the node may go through the following
+ state change progression, AVAILABLE -> DEPLOYING ->
+ DEPLOYWAIT -> DEPLOYING -> ACTIVE
+ returned: success
+ type: str
+ target_raid_config:
+ description: Represents the requested RAID configuration of the
+ node, which will be applied when the node next
+ transitions through the CLEANING state. Introduced
+ with the cleaning feature.
+ returned: success
+ type: dict
+ traits:
+ description: List of traits for this node.
+ returned: success
+ type: list
+ updated_at:
+ description: Bare Metal node updated at timestamp.
+ returned: success
+ type: str
+ vendor_interface:
+ description: Interface for vendor-specific functionality on this
+ node, e.g. "no-vendor".
+ returned: success
+ type: str
+'''
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-_PROPERTIES = {
- 'cpu_arch': 'cpu_arch',
- 'cpus': 'cpus',
- 'ram': 'memory_mb',
- 'disk_size': 'local_gb',
- 'capabilities': 'capabilities',
- 'root_device': 'root_device',
-}
-
-
-def _parse_properties(module):
- """Convert ansible properties into native ironic values.
-
- Also filter out any properties that are not set.
- """
- p = module.params['properties']
- return {to_key: p[from_key] for (from_key, to_key) in _PROPERTIES.items()
- if p.get(from_key) is not None}
-
-
-def _parse_driver_info(sdk, module):
- info = module.params['driver_info'].copy()
- for deprecated in ('power', 'console', 'management', 'deploy'):
- if deprecated in info:
- info.update(info.pop(deprecated))
- module.deprecate("Suboption %s of the driver_info parameter of "
- "'openstack.cloud.baremetal_node' is deprecated"
- % deprecated, version='2.0.0',
- collection_name='openstack.cloud')
- return info
-
-
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
+class BaremetalNodeModule(OpenStackModule):
+ argument_spec = dict(
+ bios_interface=dict(),
+ boot_interface=dict(),
+ chassis_id=dict(aliases=['chassis_uuid']),
+ console_interface=dict(),
+ deploy_interface=dict(),
+ driver=dict(),
+ driver_info=dict(type='dict', required=True),
+ id=dict(aliases=['uuid']),
+ inspect_interface=dict(),
+ management_interface=dict(),
+ name=dict(),
+ network_interface=dict(),
+ nics=dict(type='list', required=True, elements='dict'),
+ power_interface=dict(),
+ properties=dict(
+ type='dict',
+ options=dict(
+ cpu_arch=dict(),
+ cpus=dict(),
+ memory_mb=dict(aliases=['ram']),
+ local_gb=dict(aliases=['disk_size']),
+ capabilities=dict(),
+ root_device=dict(type='dict'),
+ ),
+ ),
+ raid_interface=dict(),
+ rescue_interface=dict(),
+ resource_class=dict(),
+ skip_update_of_masked_password=dict(
+ type='bool',
+ removed_in_version='3.0.0',
+ removed_from_collection='openstack.cloud',
+ ),
+ state=dict(default='present', choices=['present', 'absent']),
+ storage_interface=dict(),
+ timeout=dict(default=1800, type='int'), # increased default value
+ vendor_interface=dict(),
+ )
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('driver',)),
+ ],
+ required_one_of=[
+ ('id', 'name'),
+ ],
+ supports_check_mode=True,
+ )
-def _choose_if_password_only(module, patch):
- if len(patch) == 1:
- if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']:
- # Return false to abort update as the password appears
- # to be the only element in the patch.
+ def run(self):
+ name_or_id = \
+ self.params['id'] if self.params['id'] else self.params['name']
+ node = self.conn.baremetal.find_node(name_or_id)
+ state = self.params['state']
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, node))
+
+ if state == 'present' and not node:
+ node = self._create()
+ self.exit_json(changed=True,
+ node=node.to_dict(computed=False))
+
+ elif state == 'present' and node:
+ update = self._build_update(node)
+ if update:
+ node = self._update(node, update)
+ self.exit_json(changed=bool(update),
+ node=node.to_dict(computed=False))
+
+ elif state == 'absent' and node:
+ self._delete(node)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not node:
+ self.exit_json(changed=False)
+
+ def _build_update(self, node):
+ update = {}
+ # TODO(TheJulia): Presently this does not support updating nics.
+ # Support needs to be added.
+
+ # Update all known updateable attributes
+ node_attributes = dict(
+ (k, self.params[k])
+ for k in [
+ 'bios_interface',
+ 'boot_interface',
+ 'chassis_id',
+ 'console_interface',
+ 'deploy_interface',
+ 'driver',
+ 'driver_info',
+ 'inspect_interface',
+ 'management_interface',
+ 'name',
+ 'network_interface',
+ 'power_interface',
+ 'raid_interface',
+ 'rescue_interface',
+ 'resource_class',
+ 'storage_interface',
+ 'vendor_interface',
+ ]
+ if k in self.params and self.params[k] is not None
+ and self.params[k] != node[k])
+
+ properties = self.params['properties']
+ if properties is not None:
+ properties = dict(
+ (k, v) for k, v in properties.items() if v is not None)
+ if properties and properties != node['properties']:
+ node_attributes['properties'] = properties
+
+ # name can only be updated if id is given
+ if self.params['id'] is None and 'name' in node_attributes:
+ self.fail_json(msg='The name of a node cannot be updated without'
+ ' specifying an id')
+
+ if node_attributes:
+ update['node_attributes'] = node_attributes
+
+ return update
+
+ def _create(self):
+ kwargs = {}
+
+ for k in ('bios_interface',
+ 'boot_interface',
+ 'chassis_id',
+ 'console_interface',
+ 'deploy_interface',
+ 'driver',
+ 'driver_info',
+ 'id',
+ 'inspect_interface',
+ 'management_interface',
+ 'name',
+ 'network_interface',
+ 'power_interface',
+ 'raid_interface',
+ 'rescue_interface',
+ 'resource_class',
+ 'storage_interface',
+ 'vendor_interface'):
+ if self.params[k] is not None:
+ kwargs[k] = self.params[k]
+
+ properties = self.params['properties']
+ if properties is not None:
+ properties = dict(
+ (k, v) for k, v in properties.items() if v is not None)
+ if properties:
+ kwargs['properties'] = properties
+
+ node = self.conn.register_machine(
+ nics=self.params['nics'],
+ wait=self.params['wait'],
+ timeout=self.params['timeout'],
+ **kwargs)
+
+ self.exit_json(changed=True, node=node.to_dict(computed=False))
+
+ def _delete(self, node):
+ self.conn.unregister_machine(
+ nics=self.params['nics'], uuid=node['id'])
+
+ def _update(self, node, update):
+ node_attributes = update.get('node_attributes')
+ if node_attributes:
+ node = self.conn.baremetal.update_node(
+ node['id'], **node_attributes)
+
+ return node
+
+ def _will_change(self, state, node):
+ if state == 'present' and not node:
+ return True
+ elif state == 'present' and node:
+ return bool(self._build_update(node))
+ elif state == 'absent' and node:
+ return True
+ else:
+ # state == 'absent' and not node:
return False
- return True
-
-
-def _exit_node_not_updated(module, server):
- module.exit_json(
- changed=False,
- result="Node not updated",
- uuid=server['uuid'],
- provision_state=server['provision_state']
- )
def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- driver=dict(required=False),
- resource_class=dict(required=False),
- bios_interface=dict(required=False),
- boot_interface=dict(required=False),
- console_interface=dict(required=False),
- deploy_interface=dict(required=False),
- inspect_interface=dict(required=False),
- management_interface=dict(required=False),
- network_interface=dict(required=False),
- power_interface=dict(required=False),
- raid_interface=dict(required=False),
- rescue_interface=dict(required=False),
- storage_interface=dict(required=False),
- vendor_interface=dict(required=False),
- driver_info=dict(type='dict', required=True),
- nics=dict(type='list', required=True, elements="dict"),
- properties=dict(type='dict', default={}),
- chassis_uuid=dict(required=False),
- skip_update_of_masked_password=dict(
- required=False,
- type='bool',
- aliases=['skip_update_of_driver_password'],
- deprecated_aliases=[dict(
- name='skip_update_of_driver_password',
- version='2.0.0',
- collection_name='openstack.cloud')]
- ),
- state=dict(required=False, default='present', choices=['present', 'absent'])
- )
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- if not HAS_JSONPATCH:
- module.fail_json(msg='jsonpatch is required for this module')
-
- node_id = _choose_id_value(module)
-
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- server = cloud.get_machine(node_id)
- if module.params['state'] == 'present':
- if module.params['driver'] is None:
- module.fail_json(msg="A driver must be defined in order "
- "to set a node to present.")
-
- properties = _parse_properties(module)
- driver_info = _parse_driver_info(sdk, module)
- kwargs = dict(
- driver=module.params['driver'],
- properties=properties,
- driver_info=driver_info,
- name=module.params['name'],
- )
- optional_field_names = ('resource_class',
- 'bios_interface',
- 'boot_interface',
- 'console_interface',
- 'deploy_interface',
- 'inspect_interface',
- 'management_interface',
- 'network_interface',
- 'power_interface',
- 'raid_interface',
- 'rescue_interface',
- 'storage_interface',
- 'vendor_interface')
- for i in optional_field_names:
- if module.params[i]:
- kwargs[i] = module.params[i]
-
- if module.params['chassis_uuid']:
- kwargs['chassis_uuid'] = module.params['chassis_uuid']
-
- if server is None:
- # Note(TheJulia): Add a specific UUID to the request if
- # present in order to be able to re-use kwargs for if
- # the node already exists logic, since uuid cannot be
- # updated.
- if module.params['uuid']:
- kwargs['uuid'] = module.params['uuid']
-
- server = cloud.register_machine(module.params['nics'],
- **kwargs)
- module.exit_json(changed=True, uuid=server['uuid'],
- provision_state=server['provision_state'])
- else:
- # TODO(TheJulia): Presently this does not support updating
- # nics. Support needs to be added.
- #
- # Note(TheJulia): This message should never get logged
- # however we cannot realistically proceed if neither a
- # name or uuid was supplied to begin with.
- if not node_id:
- module.fail_json(msg="A uuid or name value "
- "must be defined")
-
- # Note(TheJulia): Constructing the configuration to compare
- # against. The items listed in the server_config block can
- # be updated via the API.
-
- server_config = dict(
- driver=server['driver'],
- properties=server['properties'],
- driver_info=server['driver_info'],
- name=server['name'],
- )
-
- # Add the pre-existing chassis_uuid only if
- # it is present in the server configuration.
- if hasattr(server, 'chassis_uuid'):
- server_config['chassis_uuid'] = server['chassis_uuid']
-
- # Note(TheJulia): If a password is defined and concealed, a
- # patch will always be generated and re-asserted.
- patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs)
-
- if not patch:
- _exit_node_not_updated(module, server)
- elif _choose_if_password_only(module, list(patch)):
- # Note(TheJulia): Normally we would allow the general
- # exception catch below, however this allows a specific
- # message.
- try:
- server = cloud.patch_machine(
- server['uuid'],
- list(patch))
- except Exception as e:
- module.fail_json(msg="Failed to update node, "
- "Error: %s" % e.message)
-
- # Enumerate out a list of changed paths.
- change_list = []
- for change in list(patch):
- change_list.append(change['path'])
- module.exit_json(changed=True,
- result="Node Updated",
- changes=change_list,
- uuid=server['uuid'],
- provision_state=server['provision_state'])
-
- # Return not updated by default as the conditions were not met
- # to update.
- _exit_node_not_updated(module, server)
-
- if module.params['state'] == 'absent':
- if not node_id:
- module.fail_json(msg="A uuid or name value must be defined "
- "in order to remove a node.")
-
- if server is not None:
- cloud.unregister_machine(module.params['nics'],
- server['uuid'])
- module.exit_json(changed=True, result="deleted")
- else:
- module.exit_json(changed=False, result="Server not found")
-
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+ module = BaremetalNodeModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_action.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_action.py
index 267e43088..a74c65ea3 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_action.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_action.py
@@ -1,44 +1,24 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: baremetal_node_action
-short_description: Activate/Deactivate Bare Metal Resources from OpenStack
+short_description: Activate/Deactivate Bare Metal nodes from OpenStack
author: OpenStack Ansible SIG
description:
- - Deploy to nodes controlled by Ironic.
+ - Deploy to Bare Metal nodes controlled by Ironic.
options:
- name:
- description:
- - Name of the node to create.
- type: str
- state:
- description:
- - Indicates desired state of the resource.
- - I(state) can be C('present'), C('absent'), C('maintenance') or C('off').
- default: present
- type: str
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
- type: str
- default: 'yes'
- uuid:
- description:
- - globally unique identifier (UUID) to be given to the resource.
- type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
- type: str
+ type: bool
+ default: true
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
@@ -47,8 +27,8 @@ options:
instance_info:
description:
- Definition of the instance information which is used to deploy
- the node. This information is only required when an instance is
- set to present.
+ the node. This information is only required when I(state) is
+ set to C(present) or C(on).
type: dict
suboptions:
image_source:
@@ -60,302 +40,228 @@ options:
image_disk_format:
description:
- The type of image that has been requested to be deployed.
+ maintenance:
+ description:
+ - Set node into maintenance mode.
+ - The power state as controlled with I(power) will not be changed
+ when maintenance mode of a node is changed.
+ type: bool
+ maintenance_reason:
+ description:
+ - A string expression regarding the reason a node is in a
+ maintenance mode.
+ type: str
+ name:
+ description:
+ - Name or ID of the Bare Metal node.
+ type: str
+ required: true
+ aliases: ['id', 'uuid']
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
- - I(power) can be C('present'), C('absent'), C('maintenance') or C('off').
+ - I(power) can be C(present), C(absent), C(maintenance), C(on) or
+ C(off).
+ choices: ['present', 'absent', 'maintenance', 'on', 'off']
default: present
type: str
- maintenance:
- description:
- - A setting to allow the direct control if a node is in
- maintenance mode.
- - I(maintenance) can be C('yes'), C('no'), C('True'), or C('False').
- type: str
- maintenance_reason:
+ state:
description:
- - A string expression regarding the reason a node is in a
- maintenance mode.
+ - Indicates desired state of the resource.
+ - I(state) can be C(present), C(absent), C(maintenance), C(on) or
+ C(off).
+ choices: ['present', 'absent', 'maintenance', 'on', 'off']
+ default: present
type: str
- wait:
- description:
- - A boolean value instructing the module to wait for node
- activation or deactivation to complete before returning.
- type: bool
- default: 'no'
timeout:
description:
- - An integer value representing the number of seconds to
- wait for the node activation or deactivation to complete.
- default: 1800
+ - Number of seconds to wait for the node activation or deactivation
+ to complete.
type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ default: 1800
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
+EXAMPLES = r'''
# Activate a node by booting an image with a configdrive attached
- openstack.cloud.baremetal_node_action:
- cloud: "openstack"
- uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
- state: present
- power: present
- deploy: True
- maintenance: False
- config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
+ deploy: true
+ cloud: "openstack"
+ config_drive: "http://192.168.1.1/host-configdrive.iso"
+ maintenance: false
+ power: present
+ uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
+ state: present
# Activate a node by booting an image with a configdrive json object
- openstack.cloud.baremetal_node_action:
- uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
auth_type: None
- ironic_url: "http://192.168.1.1:6385/"
+ auth:
+ endpoint: "http://192.168.1.1:6385/"
+ id: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
config_drive:
meta_data:
hostname: node1
public_keys:
default: ssh-rsa AAA...BBB==
+ delegate_to: localhost
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
- delegate_to: localhost
'''
+RETURN = r'''
+'''
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
-
-
-def _is_true(value):
- true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
- if value in true_values:
- return True
- return False
-
-
-def _is_false(value):
- false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
- if value in false_values:
- return True
- return False
-
-
-def _check_set_maintenance(module, cloud, node):
- if _is_true(module.params['maintenance']):
- if _is_false(node['maintenance']):
- cloud.set_machine_maintenance_state(
- node['uuid'],
- True,
- reason=module.params['maintenance_reason'])
- module.exit_json(changed=True, msg="Node has been set into "
- "maintenance mode")
- else:
- # User has requested maintenance state, node is already in the
- # desired state, checking to see if the reason has changed.
- if (str(node['maintenance_reason']) not in
- str(module.params['maintenance_reason'])):
- cloud.set_machine_maintenance_state(
- node['uuid'],
- True,
- reason=module.params['maintenance_reason'])
- module.exit_json(changed=True, msg="Node maintenance reason "
- "updated, cannot take any "
- "additional action.")
- elif _is_false(module.params['maintenance']):
- if node['maintenance'] is True:
- cloud.remove_machine_from_maintenance(node['uuid'])
- return True
- else:
- module.fail_json(msg="maintenance parameter was set but a valid "
- "the value was not recognized.")
- return False
-
-
-def _check_set_power_state(module, cloud, node):
- if 'power on' in str(node['power_state']):
- if _is_false(module.params['power']):
- # User has requested the node be powered off.
- cloud.set_machine_power_off(node['uuid'])
- module.exit_json(changed=True, msg="Power requested off")
- if 'power off' in str(node['power_state']):
- if (
- _is_false(module.params['power'])
- and _is_false(module.params['state'])
- ):
- return False
- if (
- _is_false(module.params['power'])
- and _is_false(module.params['state'])
- ):
- module.exit_json(
- changed=False,
- msg="Power for node is %s, node must be reactivated "
- "OR set to state absent"
- )
- # In the event the power has been toggled on and
- # deployment has been requested, we need to skip this
- # step.
- if (
- _is_true(module.params['power'])
- and _is_false(module.params['deploy'])
- ):
- # Node is powered down when it is not awaiting to be provisioned
- cloud.set_machine_power_on(node['uuid'])
- return True
- # Default False if no action has been taken.
- return False
-
-
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- instance_info=dict(type='dict', required=False),
- config_drive=dict(type='raw', required=False),
- state=dict(required=False, default='present'),
- maintenance=dict(required=False),
- maintenance_reason=dict(required=False),
- power=dict(required=False, default='present'),
- deploy=dict(required=False, default='yes'),
- wait=dict(type='bool', required=False, default=False),
- timeout=dict(required=False, type='int', default=1800),
+class BaremetalNodeActionModule(OpenStackModule):
+
+ argument_spec = dict(
+ config_drive=dict(type='raw'),
+ deploy=dict(type='bool', default=True),
+ instance_info=dict(type='dict'),
+ maintenance=dict(type='bool'),
+ maintenance_reason=dict(),
+ name=dict(required=True, aliases=['id', 'uuid']),
+ power=dict(default='present',
+ choices=['present', 'absent', 'maintenance', 'on', 'off']),
+ state=dict(default='present',
+ choices=['present', 'absent', 'maintenance', 'on', 'off']),
+ timeout=dict(type='int', default=1800), # increased default value
)
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- if (
- module.params['config_drive']
- and not isinstance(module.params['config_drive'], (str, dict))
- ):
- config_drive_type = type(module.params['config_drive'])
- msg = ('argument config_drive is of type %s and we expected'
- ' str or dict') % config_drive_type
- module.fail_json(msg=msg)
-
- node_id = _choose_id_value(module)
- if not node_id:
- module.fail_json(msg="A uuid or name value must be defined "
- "to use this module.")
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- node = cloud.get_machine(node_id)
-
- if node is None:
- module.fail_json(msg="node not found")
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('instance_info',)),
+ ],
+ )
- uuid = node['uuid']
- instance_info = module.params['instance_info']
- changed = False
- wait = module.params['wait']
- timeout = module.params['timeout']
+ def run(self):
+ # Fail early on invalid arguments
+ config_drive = self.params['config_drive']
+ if config_drive and not isinstance(config_drive, (str, dict)):
+ self.fail_json(msg='config_drive must be of type str or dict,'
+ ' not {0}'.format(type(config_drive)))
# User has requested desired state to be in maintenance state.
- if module.params['state'] == 'maintenance':
- module.params['maintenance'] = True
+ if self.params['state'] == 'maintenance':
+ if self.params['maintenance'] is False:
+ self.fail_json(
+ msg='state=maintenance contradicts with maintenance=false')
+ self.params['maintenance'] = True
+
+ name_or_id = self.params['name']
+ node = self.conn.baremetal.find_node(name_or_id, ignore_missing=False)
+
+ if node['provision_state'] in ['cleaning',
+ 'deleting',
+ 'wait call-back']:
+ self.fail_json(msg='Node is in {0} state, cannot act upon the'
+ ' request as the node is in a transition'
+ ' state'.format(node['provision_state']))
- if node['provision_state'] in [
- 'cleaning',
- 'deleting',
- 'wait call-back']:
- module.fail_json(msg="Node is in %s state, cannot act upon the "
- "request as the node is in a transition "
- "state" % node['provision_state'])
- # TODO(TheJulia) This is in-development code, that requires
- # code in the shade library that is still in development.
- if _check_set_maintenance(module, cloud, node):
- if node['provision_state'] in 'active':
- module.exit_json(changed=True,
- result="Maintenance state changed")
- changed = True
- node = cloud.get_machine(node_id)
+ changed = False
- if _check_set_power_state(module, cloud, node):
- changed = True
- node = cloud.get_machine(node_id)
+ # Update maintenance state
+ if self.params['maintenance']:
+ maintenance_reason = self.params['maintenance_reason']
+ if not node['maintenance'] \
+ or node['maintenance_reason'] != maintenance_reason:
+ self.conn.baremetal.set_node_maintenance(
+ node['id'], reason=maintenance_reason)
+ self.exit_json(changed=True)
+ else: # self.params['maintenance'] is False
+ if node['maintenance']:
+ self.conn.baremetal.unset_node_maintenance(node['id'])
+ if node['provision_state'] in 'active':
+ # Maintenance state changed
+ self.exit_json(changed=True)
+ changed = True
+ node = self.conn.baremetal.get_node(node['id'])
+
+ # Update power state
+ if node['power_state'] == 'power on':
+ if self.params['power'] in ['absent', 'off']:
+ # User has requested the node be powered off.
+ self.conn.baremetal.set_node_power_state(node['id'],
+ 'power off')
+ self.exit_json(changed=True)
+ elif node['power_state'] == 'power off':
+ if self.params['power'] not in ['absent', 'off'] \
+ or self.params['state'] not in ['absent', 'off']:
+ # In the event the power has been toggled on and
+ # deployment has been requested, we need to skip this
+ # step.
+ if self.params['power'] == 'present' \
+ and not self.params['deploy']:
+ # Node is powered down when it is not awaiting to be
+ # provisioned
+ self.conn.baremetal.set_node_power_state(node['id'],
+ 'power on')
+ changed = True
+ node = self.conn.baremetal.get_node(node['id'])
+ else:
+ self.fail_json(msg='Node has unknown power state {0}'
+ .format(node['power_state']))
- if _is_true(module.params['state']):
- if _is_false(module.params['deploy']):
- module.exit_json(
- changed=changed,
- result="User request has explicitly disabled "
- "deployment logic"
- )
+ if self.params['state'] in ['present', 'on']:
+ if not self.params['deploy']:
+ # User request has explicitly disabled deployment logic
+ self.exit_json(changed=changed)
if 'active' in node['provision_state']:
- module.exit_json(
- changed=changed,
- result="Node already in an active state."
- )
-
- if instance_info is None:
- module.fail_json(
- changed=changed,
- msg="When setting an instance to present, "
- "instance_info is a required variable.")
+ # Node already in an active state
+ self.exit_json(changed=changed)
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
- cloud.update_machine(uuid, instance_info=instance_info)
- cloud.validate_node(uuid)
- if not wait:
- cloud.activate_node(uuid, module.params['config_drive'])
- else:
- cloud.activate_node(
- uuid,
- configdrive=module.params['config_drive'],
- wait=wait,
- timeout=timeout)
+ self.conn.baremetal.update_node(
+ node['id'],
+ instance_info=self.params['instance_info'])
+ self.conn.baremetal.validate_node(node['id'])
+ self.conn.baremetal.set_node_provision_state(
+ node['id'],
+ target='active',
+ config_drive=self.params['config_drive'],
+ wait=self.params['wait'],
+ timeout=self.params['timeout'])
+
# TODO(TheJulia): Add more error checking..
- module.exit_json(changed=changed, result="node activated")
+ self.exit_json(changed=True)
- elif _is_false(module.params['state']):
- if node['provision_state'] not in "deleted":
- cloud.update_machine(uuid, instance_info={})
- if not wait:
- cloud.deactivate_node(uuid)
- else:
- cloud.deactivate_node(
- uuid,
- wait=wait,
- timeout=timeout)
+ elif node['provision_state'] not in 'deleted':
+ self.conn.baremetal.update_node(node['id'], instance_info={})
+ self.conn.baremetal.set_node_provision_state(
+ node['id'],
+ target='deleted',
+ wait=self.params['wait'],
+ timeout=self.params['timeout'])
+ self.exit_json(changed=True)
- module.exit_json(changed=True, result="deleted")
- else:
- module.exit_json(changed=False, result="node not found")
else:
- module.fail_json(msg="State must be present, absent, "
- "maintenance, off")
+ # self.params['state'] in ['absent', 'off']
+ # and node['provision_state'] in 'deleted'
+ self.exit_json(changed=changed)
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+
+def main():
+ module = BaremetalNodeActionModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_info.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_info.py
index 8141fcdfa..457611718 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_node_info.py
@@ -1,90 +1,78 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2021 by Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
module: baremetal_node_info
short_description: Retrieve information about Bare Metal nodes from OpenStack
author: OpenStack Ansible SIG
description:
- Retrieve information about Bare Metal nodes from OpenStack.
options:
- node:
- description:
- - Name or globally unique identifier (UUID) to identify the host.
- type: str
mac:
description:
- - Unique mac address that is used to attempt to identify the host.
+ - MAC address that is used to attempt to identify the host.
type: str
- ironic_url:
+ name:
description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
+ - Name or ID of the baremetal node.
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ aliases: ['node']
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about all baremeal nodes
-- openstack.cloud.baremetal_node_info:
+EXAMPLES = r'''
+- name: Gather information about all baremeal nodes
+ openstack.cloud.baremetal_node_info:
cloud: "devstack"
- register: result
-- debug:
- msg: "{{ result.baremetal_nodes }}"
-# Gather information about a baremeal node
-- openstack.cloud.baremetal_node_info:
+ register: nodes
+
+- debug: var=nodes
+
+- name: Gather information about a baremeal node
+ openstack.cloud.baremetal_node_info:
cloud: "devstack"
- node: "00000000-0000-0000-0000-000000000002"
- register: result
-- debug:
- msg: "{{ result.baremetal_nodes }}"
+ name: "00000000-0000-0000-0000-000000000002"
+ register: nodes
+
+- debug: var=nodes
'''
-RETURN = '''
-baremetal_nodes:
- description: Bare Metal node list. A subset of the dictionary keys
- listed below may be returned, depending on your cloud
- provider.
- returned: always, but can be null
- type: complex
+RETURN = r'''
+nodes:
+ description: |
+ Bare Metal node list. A subset of the dictionary keys listed below may
+ be returned, depending on your cloud provider.
+ returned: always
+ type: list
+ elements: dict
contains:
- allocation_uuid:
+ allocation_id:
description: The UUID of the allocation associated with the node.
- If not null, will be the same as instance_uuid (the
- opposite is not always true). Unlike instance_uuid,
+ If not null, will be the same as instance_id (the
+ opposite is not always true). Unlike instance_id,
this field is read-only. Please use the Allocation API
to remove allocations.
returned: success
type: str
- automated_clean:
- description: Indicates whether the node will perform automated
- clean or not.
- returned: success
- type: bool
bios_interface:
description: The bios interface to be used for this node.
returned: success
type: str
boot_interface:
- description: The boot interface for a Node, e.g. "pxe".
+ description: The boot interface for a node, e.g. "pxe".
returned: success
type: str
boot_mode:
description: The boot mode for a node, either "uefi" or "bios"
returned: success
type: str
- chassis_uuid:
- description: UUID of the chassis associated with this Node. May be
+ chassis_id:
+ description: UUID of the chassis associated with this node. May be
empty or None.
returned: success
type: str
@@ -93,21 +81,13 @@ baremetal_nodes:
returned: success
type: str
conductor:
- description: The conductor currently servicing a node. This field
- is read-only.
+ description: The conductor currently servicing a node.
returned: success
type: str
conductor_group:
- description: The conductor group for a node. Case-insensitive
- string up to 255 characters, containing a-z, 0-9, _,
- -, and ..
+ description: The conductor group for a node.
returned: success
type: str
- console_enabled:
- description: Indicates whether console access is enabled or
- disabled on this node.
- returned: success
- type: bool
console_interface:
description: The console interface for a node, e.g. "no-console".
returned: success
@@ -130,13 +110,13 @@ baremetal_nodes:
type: str
driver_info:
description: All the metadata required by the driver to manage this
- Node. List of fields varies between drivers, and can
+ node. List of fields varies between drivers, and can
be retrieved from the
/v1/drivers/<DRIVER_NAME>/properties resource.
returned: success
type: dict
driver_internal_info:
- description: Internal metadata set and stored by the Node's driver.
+ description: Internal metadata set and stored by the node's driver.
returned: success
type: dict
extra:
@@ -146,7 +126,7 @@ baremetal_nodes:
type: dict
fault:
description: The fault indicates the active fault detected by
- ironic, typically the Node is in "maintenance mode".
+ ironic, typically the node is in "maintenance mode".
None means no fault has been detected by ironic.
"power failure" indicates ironic failed to retrieve
power state from this node. There are other possible
@@ -162,27 +142,32 @@ baremetal_nodes:
description: The interface used for node inspection.
returned: success
type: str
+ instance_id:
+ description: UUID of the Nova instance associated with this node.
+ returned: success
+ type: str
instance_info:
description: Information used to customize the deployed image. May
include root partition size, a base 64 encoded config
drive, and other metadata. Note that this field is
erased automatically when the instance is deleted
- (this is done by requesting the Node provision state
+ (this is done by requesting the node provision state
be changed to DELETED).
returned: success
type: dict
- instance_uuid:
- description: UUID of the Nova instance associated with this Node.
+ is_automated_clean_enabled:
+ description: Indicates whether the node will perform automated
+ clean or not.
returned: success
- type: str
- last_error:
- description: Any error from the most recent (last) transaction that
- started but failed to finish.
+ type: bool
+ is_console_enabled:
+ description: Indicates whether console access is enabled or
+ disabled on this node.
returned: success
- type: str
- maintenance:
- description: Whether or not this Node is currently in "maintenance
- mode". Setting a Node into maintenance mode removes it
+ type: bool
+ is_maintenance:
+ description: Whether or not this node is currently in "maintenance
+ mode". Setting a node into maintenance mode removes it
from the available resource pool and halts some
internal automation. This can happen manually (eg, via
an API request) or automatically when Ironic detects a
@@ -190,8 +175,35 @@ baremetal_nodes:
machine.
returned: success
type: bool
+ is_protected:
+ description: Whether the node is protected from undeploying,
+ rebuilding and deletion.
+ returned: success
+ type: bool
+ is_retired:
+ description: Whether the node is retired and can hence no longer be
+ provided, i.e. move from manageable to available, and
+ will end up in manageable after cleaning (rather than
+ available).
+ returned: success
+ type: bool
+ is_secure_boot:
+ description: Indicates whether node is currently booted with
+ secure_boot turned on.
+ returned: success
+ type: bool
+ last_error:
+ description: Any error from the most recent (last) transaction that
+ started but failed to finish.
+ returned: success
+ type: str
+ links:
+ description: A list of relative links, including self and bookmark
+ links.
+ returned: success
+ type: list
maintenance_reason:
- description: User-settable description of the reason why this Node
+ description: User-settable description of the reason why this node
was placed into maintenance mode
returned: success
type: str
@@ -200,189 +212,51 @@ baremetal_nodes:
returned: success
type: str
name:
- description: Human-readable identifier for the Node resource. May
+ description: Human-readable identifier for the node resource. May
be undefined. Certain words are reserved.
returned: success
type: str
network_interface:
description: Which Network Interface provider to use when plumbing
- the network connections for this Node.
+ the network connections for this node.
returned: success
type: str
owner:
description: A string or UUID of the tenant who owns the object.
returned: success
type: str
- portgroups:
- description: List of ironic portgroups on this node.
- returned: success
- type: list
- elements: dict
- contains:
- address:
- description: Physical hardware address of this Portgroup,
- typically the hardware MAC address.
- returned: success
- type: str
- created_at:
- description: The UTC date and time when the resource was
- created, ISO 8601 format.
- returned: success
- type: str
- extra:
- description: A set of one or more arbitrary metadata key and
- value pairs.
- returned: success
- type: dict
- id:
- description: The UUID for the resource.
- returned: success
- type: str
- internal_info:
- description: Internal metadata set and stored by the Portgroup.
- This field is read-only.
- returned: success
- type: dict
- is_standalone_ports_supported:
- description: Indicates whether ports that are members of this
- portgroup can be used as stand-alone ports.
- returned: success
- type: bool
- mode:
- description: Mode of the port group. For possible values, refer
- to https://www.kernel.org/doc/Documentation/networking/bonding.txt.
- If not specified in a request to create a port
- group, it will be set to the value of the
- [DEFAULT]default_portgroup_mode configuration
- option. When set, can not be removed from the port
- group.
- returned: success
- type: str
- name:
- description: Human-readable identifier for the Portgroup
- resource. May be undefined.
- returned: success
- type: str
- node_id:
- description: UUID of the Node this resource belongs to.
- returned: success
- type: str
- ports:
- description: List of port UUID's of ports belonging to this
- portgroup.
- returned: success
- type: list
- properties:
- description: Key/value properties related to the port group's
- configuration.
- returned: success
- type: dict
- updated_at:
- description: The UTC date and time when the resource was
- updated, ISO 8601 format. May be "null".
- returned: success
- type: str
ports:
description: List of ironic ports on this node.
returned: success
type: list
- elements: dict
- contains:
- address:
- description: Physical hardware address of this network Port,
- typically the hardware MAC address.
- returned: success
- type: str
- created_at:
- description: The UTC date and time when the resource was
- created, ISO 8601 format.
- returned: success
- type: str
- extra:
- description: A set of one or more arbitrary metadata key and
- value pairs.
- returned: success
- type: dict
- id:
- description: The UUID for the resource.
- returned: success
- type: str
- internal_info:
- description: Internal metadata set and stored by the Port. This
- field is read-only.
- returned: success
- type: dict
- local_link_connection:
- description: The Port binding profile. If specified, must
- contain switch_id (only a MAC address or an
- OpenFlow based datapath_id of the switch are
- accepted in this field) and port_id (identifier of
- the physical port on the switch to which node's
- port is connected to) fields. switch_info is an
- optional string field to be used to store any
- vendor-specific information.
- returned: success
- type: dict
- name:
- description: The name of the resource.
- returned: success
- type: str
- node_uuid:
- description: UUID of the Node this resource belongs to.
- returned: success
- type: str
- physical_network:
- description: The name of the physical network to which a port
- is connected. May be empty.
- returned: success
- type: str
- portgroup_uuid:
- description: UUID of the Portgroup this resource belongs to.
- returned: success
- type: str
- pxe_enabled:
- description: Indicates whether PXE is enabled or disabled on
- the Port.
- returned: success
- type: str
- updated_at:
- description: The UTC date and time when the resource was
- updated, ISO 8601 format. May be "null".
- returned: success
- type: str
- uuid:
- description: The UUID for the resource.
- returned: success
- type: str
+ port_groups:
+ description: List of ironic port groups on this node.
+ returned: success
+ type: list
power_interface:
description: Interface used for performing power actions on the
node, e.g. "ipmitool".
returned: success
type: str
power_state:
- description: The current power state of this Node. Usually, "power
+ description: The current power state of this node. Usually, "power
on" or "power off", but may be "None" if Ironic is
unable to determine the power state (eg, due to
hardware failure).
returned: success
type: str
properties:
- description: Physical characteristics of this Node. Populated by
+ description: Physical characteristics of this node. Populated by
ironic-inspector during inspection. May be edited via
the REST API at any time.
returned: success
type: dict
- protected:
- description: Whether the node is protected from undeploying,
- rebuilding and deletion.
- returned: success
- type: bool
protected_reason:
description: The reason the node is marked as protected.
returned: success
type: str
provision_state:
- description: The current provisioning state of this Node.
+ description: The current provisioning state of this node.
returned: success
type: str
raid_config:
@@ -406,27 +280,19 @@ baremetal_nodes:
type: str
resource_class:
description: A string which can be used by external schedulers to
- identify this Node as a unit of a specific type of
+ identify this node as a unit of a specific type of
resource. For more details, see
https://docs.openstack.org/ironic/latest/install/configure-nova-flavors.html
returned: success
type: str
- retired:
- description: Whether the node is retired and can hence no longer be
- provided, i.e. move from manageable to available, and
- will end up in manageable after cleaning (rather than
- available).
- returned: success
- type: bool
retired_reason:
description: The reason the node is marked as retired.
returned: success
type: str
- secure_boot:
- description: Indicates whether node is currently booted with
- secure_boot turned on.
+ states:
+ description: Links to the collection of states.
returned: success
- type: bool
+ type: list
storage_interface:
description: Interface used for attaching and detaching volumes on
this node, e.g. "cinder".
@@ -441,17 +307,17 @@ baremetal_nodes:
target_provision_state:
description: If a provisioning action has been requested, this
field represents the requested (ie, "target") state.
- Note that a Node may go through several states during
+ Note that a node may go through several states during
its transition to this target state. For instance,
when requesting an instance be deployed to an
- AVAILABLE Node, the Node may go through the following
+ AVAILABLE node, the node may go through the following
state change progression, AVAILABLE -> DEPLOYING ->
DEPLOYWAIT -> DEPLOYING -> ACTIVE
returned: success
type: str
target_raid_config:
description: Represents the requested RAID configuration of the
- node, which will be applied when the Node next
+ node, which will be applied when the node next
transitions through the CLEANING state. Introduced
with the cleaning feature.
returned: success
@@ -464,91 +330,79 @@ baremetal_nodes:
description: Bare Metal node updated at timestamp.
returned: success
type: str
- uuid:
- description: The UUID for the resource.
- returned: success
- type: str
vendor_interface:
description: Interface for vendor-specific functionality on this
node, e.g. "no-vendor".
returned: success
type: str
+baremetal_nodes:
+ description: Same as C(nodes), kept for backward compatibility.
+ returned: always
+ type: list
+ elements: dict
'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-def cleanup_node_properties(machine, cloud):
- # states are links, not useful
- machine.pop('states', None)
-
- for port in machine.ports:
- # links are not useful
- port.pop('links', None)
- # redundant, location is in on machine as well
- port.pop('location', None)
-
- for portgroup in machine.portgroups:
- # links are not useful
- portgroup.pop('links', None)
- # redundant, location is in on machine as well
- portgroup.pop('location', None)
- # links to ports are not useful, replace with list of port uuid's
- portgroup['ports'] = [x.id for x in list(
- cloud.baremetal.ports(portgroup=portgroup['id']))]
-
-
-def get_ports_and_portgroups(cloud, machine):
- machine.ports = cloud.list_nics_for_machine(machine.uuid)
- machine.portgroups = [dict(x) for x in
- list(cloud.baremetal.port_groups(node=machine.uuid,
- details=True))]
-
-
-def main():
- argument_spec = ironic_argument_spec(
- node=dict(required=False),
- mac=dict(required=False),
+class BaremetalNodeInfoModule(OpenStackModule):
+ argument_spec = dict(
+ mac=dict(),
+ name=dict(aliases=['node']),
)
- module_kwargs = openstack_module_kwargs()
- module_kwargs['supports_check_mode'] = True
- module = IronicModule(argument_spec, **module_kwargs)
+ module_kwargs = dict(
+ mutually_exclusive=[
+ ('mac', 'name'),
+ ],
+ supports_check_mode=True,
+ )
- machine = None
- machines = list()
+ def run(self):
+ name_or_id = self.params['name']
+ mac = self.params['mac']
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- if module.params['node']:
- machine = cloud.get_machine(module.params['node'])
- elif module.params['mac']:
- machine = cloud.get_machine_by_mac(module.params['mac'])
+ node_id = None
+ if name_or_id:
+ # self.conn.baremetal.nodes() does not support searching by name or
+ # id which we want to provide for backward compatibility
+ node = self.conn.baremetal.find_node(name_or_id)
+ if node:
+ node_id = node['id']
+ elif mac:
+ # self.conn.get_machine_by_mac(mac) is not necessary
+ # because nodes can be filtered by instance_id
+ baremetal_port = self.conn.get_nic_by_mac(mac)
+ if baremetal_port:
+ node_id = baremetal_port['node_id']
- # Fail if node not found
- if (module.params['node'] or module.params['mac']) and not machine:
- module.fail_json(msg='The baremetal node was not found')
+ if name_or_id or mac:
+ if node_id:
+ # fetch node details with self.conn.baremetal.get_node()
+ # because self.conn.baremetal.nodes() does not provide a
+ # query parameter to filter by a node's id
+ node = self.conn.baremetal.get_node(node_id)
+ nodes = [node.to_dict(computed=False)]
+ else: # not node_id
+ # return empty list when no matching node could be found
+ # because *_info modules do not raise errors on missing
+ # resources
+ nodes = []
+ else: # not name_or_id and not mac
+ nodes = [node.to_dict(computed=False) for node in
+ self.conn.baremetal.nodes(details=True)]
- if machine:
- machines.append(machine)
- else:
- machines = cloud.list_machines()
+ self.exit_json(changed=False,
+ nodes=nodes,
+ # keep for backward compatibility
+ baremetal_nodes=nodes)
- for machine in machines:
- get_ports_and_portgroups(cloud, machine)
- cleanup_node_properties(machine, cloud)
- module.exit_json(changed=False, baremetal_nodes=machines)
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+def main():
+ module = BaremetalNodeInfoModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_port.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_port.py
index a72c1da6c..9a83d247a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_port.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_port.py
@@ -1,39 +1,36 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
module: baremetal_port
short_description: Create/Delete Bare Metal port Resources from OpenStack
author: OpenStack Ansible SIG
description:
- Create, Update and Remove ironic ports from OpenStack.
options:
- state:
- description:
- - Indicates desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- uuid:
- description:
- - globally unique identifier (UUID) to be given to the resource. Will
- be auto-generated if not specified.
- type: str
- node:
- description:
- - UUID or Name of the Node this resource belongs to.
- type: str
address:
description:
- Physical hardware address of this network Port, typically the
hardware MAC address.
type: str
- portgroup:
+ extra:
+ description:
+ - A set of one or more arbitrary metadata key and value pairs.
+ type: dict
+ id:
description:
- - UUID or Name of the Portgroup this resource belongs to.
+ - ID of the Port.
+ - Will be auto-generated if not specified.
type: str
+ aliases: ['uuid']
+ is_pxe_enabled:
+ description:
+ - Whether PXE should be enabled or disabled on the Port.
+ type: bool
+ aliases: ['pxe_enabled']
local_link_connection:
description:
- The Port binding profile.
@@ -53,41 +50,37 @@ options:
- An optional string field to be used to store any vendor-specific
information.
type: str
- is_pxe_enabled:
+ node:
description:
- - Whether PXE should be enabled or disabled on the Port.
- type: bool
+ - ID or Name of the Node this resource belongs to.
+ type: str
physical_network:
description:
- The name of the physical network to which a port is connected.
type: str
- extra:
+ port_group:
description:
- - A set of one or more arbitrary metadata key and value pairs.
- type: dict
- ironic_url:
+ - ID or Name of the portgroup this resource belongs to.
+ type: str
+ aliases: ['portgroup']
+ state:
description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
+ - Indicates desired state of the resource
+ choices: ['present', 'absent']
+ default: present
type: str
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create Bare Metal port
+EXAMPLES = r'''
- name: Create Bare Metal port
openstack.cloud.baremetal_port:
cloud: devstack
state: present
node: bm-0
address: fa:16:3e:aa:aa:aa
- pxe_enabled: True
+ is_pxe_enabled: True
local_link_connection:
switch_id: 0a:1b:2c:3d:4e:5f
port_id: Ethernet3/1
@@ -96,45 +89,32 @@ EXAMPLES = '''
something: extra
physical_network: datacenter
register: result
-# Delete Bare Metal port
+
- name: Delete Bare Metal port
openstack.cloud.baremetal_port:
cloud: devstack
state: absent
address: fa:16:3e:aa:aa:aa
register: result
-# Update Bare Metal port
+
- name: Update Bare Metal port
openstack.cloud.baremetal_port:
cloud: devstack
state: present
- uuid: 1a85ebca-22bf-42eb-ad9e-f640789b8098
- pxe_enabled: False
+ id: 1a85ebca-22bf-42eb-ad9e-f640789b8098
+ is_pxe_enabled: False
local_link_connection:
switch_id: a0:b1:c2:d3:e4:f5
port_id: Ethernet4/12
switch_info: switch2
'''
-RETURN = '''
-id:
- description: Unique UUID of the port.
- returned: always, but can be null
- type: str
-result:
- description: A short text describing the result.
- returned: success
- type: str
-changes:
- description: Map showing from -> to values for properties that was changed
- after port update.
- returned: success
- type: dict
+RETURN = r'''
port:
description: A port dictionary, subset of the dictionary keys listed below
may be returned, depending on your cloud provider.
returned: success
- type: complex
+ type: dict
contains:
address:
description: Physical hardware address of this network Port,
@@ -163,6 +143,11 @@ port:
description: Whether PXE is enabled or disabled on the Port.
returned: success
type: bool
+ links:
+ description: A list of relative links, including the self and
+ bookmark links.
+ returned: success
+ type: list
local_link_connection:
description: The Port binding profile. If specified, must contain
switch_id (only a MAC address or an OpenFlow based
@@ -201,172 +186,113 @@ port:
type: str
'''
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-_PROP_TO_ATTR_MAP = {
- 'pxe_enabled': 'is_pxe_enabled',
- 'address': 'address',
- 'extra': 'extra',
- 'local_link_connection': 'local_link_connection',
- 'physical_network': 'physical_network',
- 'node_uuid': 'node_id',
- 'portgroup_uuid': 'port_group_id',
- 'uuid': 'id',
-}
-
-
-def find_port(module, cloud):
- port = None
- if module.params['uuid']:
- port = cloud.baremetal.find_port(module.params['uuid'])
- elif module.params['address']:
- ports = list(cloud.baremetal.ports(address=module.params['address'],
- details=True))
- if ports and len(ports) == 1:
- port = ports[0]
- elif len(ports) > 1:
- module.fail_json(
- msg="Multiple ports with address {address} found. A uuid must "
- "be defined in order to identify the correct port"
- .format(address=module.params['address']))
-
- return port
-
-
-def add_port(module, cloud):
- port = find_port(module, cloud)
- if port:
- update_port(module, cloud, port=port)
-
- if not module.params['node'] or not module.params['address']:
- module.fail_json(
- msg="A Bare Metal node (name or uuid) and an address is required "
- "to create a port")
- machine = cloud.get_machine(module.params['node'])
- if not machine:
- module.fail_json(
- msg="Bare Metal node {node} could not be found".format(
- node=module.params['node']))
-
- module.params['node_uuid'] = machine.id
- props = {k: module.params[k] for k in _PROP_TO_ATTR_MAP.keys()
- if k in module.params}
- port = cloud.baremetal.create_port(**props)
- port_dict = port.to_dict()
- port_dict.pop('links', None)
- module.exit_json(
- changed=True,
- result="Port successfully created",
- changes=None,
- port=port_dict,
- id=port_dict['id'])
-
-
-def update_port(module, cloud, port=None):
- if not port:
- port = find_port(module, cloud)
-
- if module.params['node']:
- machine = cloud.get_machine(module.params['node'])
- if machine:
- module.params['node_uuid'] = machine.id
-
- old_props = {k: port[v] for k, v in _PROP_TO_ATTR_MAP.items()}
- new_props = {k: module.params[k] for k in _PROP_TO_ATTR_MAP.keys()
- if k in module.params and module.params[k] is not None}
- prop_diff = {k: new_props[k] for k in _PROP_TO_ATTR_MAP.keys()
- if k in new_props and old_props[k] != new_props[k]}
-
- if not prop_diff:
- port_dict = port.to_dict()
- port_dict.pop('links', None)
- module.exit_json(
- changed=False,
- result="No port update required",
- changes=None,
- port=port_dict,
- id=port_dict['id'])
-
- port = cloud.baremetal.update_port(port.id, **prop_diff)
- port_dict = port.to_dict()
- port_dict.pop('links', None)
- module.exit_json(
- changed=True,
- result="Port successfully updated",
- changes={k: {'to': new_props[k], 'from': old_props[k]}
- for k in prop_diff},
- port=port_dict,
- id=port_dict['id'])
-
-
-def remove_port(module, cloud):
- if not module.params['uuid'] and not module.params['address']:
- module.fail_json(
- msg="A uuid or an address value must be defined in order to "
- "remove a port.")
- if module.params['uuid']:
- port = cloud.baremetal.delete_port(module.params['uuid'])
- if not port:
- module.exit_json(
- changed=False,
- result="Port not found",
- changes=None,
- id=module.params['uuid'])
- else:
- port = find_port(module, cloud)
- if not port:
- module.exit_json(
- changed=False,
- result="Port not found",
- changes=None,
- id=None)
- port = cloud.baremetal.delete_port(port.id)
-
- module.exit_json(
- changed=True,
- result="Port successfully removed",
- changes=None,
- id=port.id)
-
-
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- node=dict(required=False),
- address=dict(required=False),
- portgroup=dict(required=False),
- local_link_connection=dict(required=False, type='dict'),
- is_pxe_enabled=dict(required=False, type='bool'),
- physical_network=dict(required=False),
- extra=dict(required=False, type='dict'),
- state=dict(required=False,
- default='present',
- choices=['present', 'absent'])
+class BaremetalPortModule(OpenStackModule):
+ argument_spec = dict(
+ address=dict(),
+ extra=dict(type='dict'),
+ id=dict(aliases=['uuid']),
+ is_pxe_enabled=dict(type='bool', aliases=['pxe_enabled']),
+ local_link_connection=dict(type='dict'),
+ node=dict(),
+ physical_network=dict(),
+ port_group=dict(aliases=['portgroup']),
+ state=dict(default='present', choices=['present', 'absent']),
)
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- module.params['pxe_enabled'] = module.params.pop('is_pxe_enabled', None)
+ module_kwargs = dict(
+ required_one_of=[
+ ('id', 'address'),
+ ],
+ required_if=[
+ ('state', 'present', ('node', 'address',), False),
+ ],
+ )
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- if module.params['state'] == 'present':
- add_port(module, cloud)
+ def run(self):
+ port = self._find_port()
+ state = self.params['state']
+ if state == 'present':
+ # create or update port
+
+ kwargs = {}
+ id = self.params['id']
+ if id:
+ kwargs['id'] = id
+
+ node_name_or_id = self.params['node']
+ # assert node_name_or_id
+ node = self.conn.baremetal.find_node(node_name_or_id,
+ ignore_missing=False)
+ kwargs['node_id'] = node['id']
+
+ port_group_name_or_id = self.params['port_group']
+ if port_group_name_or_id:
+ port_group = self.conn.baremetal.find_port_group(
+ port_group_name_or_id, ignore_missing=False)
+ kwargs['port_group_id'] = port_group['id']
+
+ for k in ['address', 'extra', 'is_pxe_enabled',
+ 'local_link_connection', 'physical_network']:
+ if self.params[k] is not None:
+ kwargs[k] = self.params[k]
+
+ changed = True
+ if not port:
+ # create port
+ port = self.conn.baremetal.create_port(**kwargs)
+ else:
+ # update port
+ updates = dict((k, v)
+ for k, v in kwargs.items()
+ if v != port[k])
+
+ if updates:
+ port = \
+ self.conn.baremetal.update_port(port['id'], **updates)
+ else:
+ changed = False
+
+ self.exit_json(changed=changed, port=port.to_dict(computed=False))
+
+ if state == 'absent':
+ # remove port
+ if not port:
+ self.exit_json(changed=False)
+
+ port = self.conn.baremetal.delete_port(port['id'])
+ self.exit_json(changed=True)
+
+ def _find_port(self):
+ id = self.params['id']
+ if id:
+ return self.conn.baremetal.get_port(id)
+
+ address = self.params['address']
+ if address:
+ ports = list(self.conn.baremetal.ports(address=address,
+ details=True))
+
+ if len(ports) == 1:
+ return ports[0]
+ elif len(ports) > 1:
+ raise ValueError(
+ 'Multiple ports with address {address} found. A ID'
+ ' must be defined in order to identify a unique'
+ ' port.'.format(address=address))
+ else:
+ return None
+
+ raise AssertionError("id or address must be specified")
- if module.params['state'] == 'absent':
- remove_port(module, cloud)
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+def main():
+ module = BaremetalPortModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/baremetal_port_info.py b/ansible_collections/openstack/cloud/plugins/modules/baremetal_port_info.py
index d70c284dd..bf923ea40 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/baremetal_port_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/baremetal_port_info.py
@@ -1,70 +1,59 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
module: baremetal_port_info
short_description: Retrieve information about Bare Metal ports from OpenStack
author: OpenStack Ansible SIG
description:
- Retrieve information about Bare Metal ports from OpenStack.
options:
- uuid:
- description:
- - Name or globally unique identifier (UUID) to identify the port.
- type: str
address:
description:
- Physical hardware address of this network Port, typically the
hardware MAC address.
type: str
- node:
+ name:
description:
- - Name or globally unique identifier (UUID) to identify a Baremetal
- Node.
+ - Name or ID of the Bare Metal port.
type: str
- ironic_url:
+ aliases: ['uuid']
+ node:
description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
+ - Name or ID of a Bare Metal node.
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about all baremetal ports
-- openstack.cloud.baremetal_port_info:
+EXAMPLES = r'''
+- name: Gather information about all baremetal ports
+ openstack.cloud.baremetal_port_info:
cloud: devstack
- register: result
-# Gather information about a baremetal port by address
-- openstack.cloud.baremetal_port_info:
+
+- name: Gather information about a baremetal port by address
+ openstack.cloud.baremetal_port_info:
cloud: devstack
address: fa:16:3e:aa:aa:aa
- register: result
-# Gather information about a baremetal port by address
-- openstack.cloud.baremetal_port_info:
+
+- name: Gather information about a baremetal port by address
+ openstack.cloud.baremetal_port_info:
cloud: devstack
- uuid: a2b6bd99-77b9-43f0-9ddc-826568e68dec
- register: result
-# Gather information about a baremetal ports associated with a baremetal node
-- openstack.cloud.baremetal_port_info:
+ name: a2b6bd99-77b9-43f0-9ddc-826568e68dec
+
+- name: Gather information about a baremetal ports associated with a node
+ openstack.cloud.baremetal_port_info:
cloud: devstack
node: bm-0
- register: result
'''
-RETURN = '''
-baremetal_ports:
- description: Bare Metal port list. A subset of the dictionary keys
- listed below may be returned, depending on your cloud
- provider.
- returned: always, but can be null
+RETURN = r'''
+ports:
+ description: Bare Metal port list.
+ returned: always
type: list
elements: dict
contains:
@@ -95,6 +84,11 @@ baremetal_ports:
description: Whether PXE is enabled or disabled on the Port.
returned: success
type: bool
+ links:
+ description: A list of relative links, including the self and
+ bookmark links.
+ returned: success
+ type: list
local_link_connection:
description: The Port binding profile.
returned: success
@@ -140,68 +134,58 @@ baremetal_ports:
type: str
'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
+ OpenStackModule
)
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- address=dict(required=False),
- node=dict(required=False),
+class BaremetalPortInfoModule(OpenStackModule):
+ argument_spec = dict(
+ address=dict(),
+ name=dict(aliases=['uuid']),
+ node=dict(),
)
- module_kwargs = openstack_module_kwargs()
- module_kwargs['supports_check_mode'] = True
- module = IronicModule(argument_spec, **module_kwargs)
-
- ports = list()
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- if module.params['uuid']:
- port = cloud.baremetal.find_port(module.params['uuid'])
- if not port:
- module.fail_json(
- msg='Baremetal port with uuid {uuid} was not found'
- .format(uuid=module.params['uuid']))
- ports.append(port)
-
- elif module.params['address']:
- ports = list(
- cloud.baremetal.ports(address=module.params['address'],
- details=True))
- if not ports:
- module.fail_json(
- msg='Baremetal port with address {address} was not found'
- .format(address=module.params['address']))
-
- elif module.params['node']:
- machine = cloud.get_machine(module.params['node'])
- if not machine:
- module.fail_json(
- msg='Baremetal node {node} was not found'
- .format(node=module.params['node']))
- ports = list(
- cloud.baremetal.ports(node_uuid=machine.uuid, details=True))
-
- else:
- ports = list(cloud.baremetal.ports(details=True))
-
- # Convert ports to dictionaries and cleanup properties
- ports = [port.to_dict() for port in ports]
- for port in ports:
- # links are not useful
- port.pop('links', None)
-
- module.exit_json(changed=False, baremetal_ports=ports)
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
+
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
+
+ def _fetch_ports(self):
+ name_or_id = self.params['name']
+
+ if name_or_id:
+ port = self.conn.baremetal.find_port(name_or_id)
+ return [port] if port else []
+
+ kwargs = {}
+ address = self.params['address']
+ if address:
+ kwargs['address'] = address
+
+ node_name_or_id = self.params['node']
+ if node_name_or_id:
+ node = self.conn.baremetal.find_node(node_name_or_id)
+ if node:
+ kwargs['node_uuid'] = node['id']
+ else:
+ # node does not exist so no port could possibly be found
+ return []
+
+ return self.conn.baremetal.ports(details=True, **kwargs)
+
+ def run(self):
+ ports = [port.to_dict(computed=False)
+ for port in self._fetch_ports()]
+
+ self.exit_json(changed=False,
+ ports=ports,
+ # keep for backward compatibility
+ baremetal_ports=ports)
+
+
+def main():
+ module = BaremetalPortInfoModule()
+ module()
if __name__ == "__main__":
diff --git a/ansible_collections/openstack/cloud/plugins/modules/catalog_service.py b/ansible_collections/openstack/cloud/plugins/modules/catalog_service.py
index 6d1962f3e..2587169f9 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/catalog_service.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/catalog_service.py
@@ -1,78 +1,86 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright 2016 Sam Yaple
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: catalog_service
-short_description: Manage OpenStack Identity services
+short_description: Manage OpenStack services
author: OpenStack Ansible SIG
description:
- - Create, update, or delete OpenStack Identity service. If a service
- with the supplied name already exists, it will be updated with the
- new description and enabled attributes.
+ - Create, update or delete a OpenStack service.
options:
name:
description:
- - Name of the service
+ - Name of the service.
required: true
type: str
description:
description:
- - Description of the service
+ - Description of the service.
type: str
- enabled:
+ is_enabled:
description:
- - Is the service enabled
+ - Whether this service is enabled or not.
type: bool
- default: 'yes'
- aliases: ['is_enabled']
+ aliases: ['enabled']
type:
description:
- - The type of service
+ - The type of service.
required: true
type: str
aliases: ['service_type']
state:
description:
- - Should the resource be present or absent.
- choices: [present, absent]
+ - Whether the service should be C(present) or C(absent).
+ choices: ['present', 'absent']
default: present
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a service for glance
-- openstack.cloud.catalog_service:
+EXAMPLES = r'''
+- name: Create a service for glance
+ openstack.cloud.catalog_service:
cloud: mycloud
state: present
name: glance
type: image
description: OpenStack Image Service
-# Delete a service
-- openstack.cloud.catalog_service:
+
+- name: Delete a service
+ openstack.cloud.catalog_service:
cloud: mycloud
state: absent
name: glance
type: image
'''
-RETURN = '''
+RETURN = r'''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
- type: complex
+ type: dict
contains:
+ description:
+ description: Service description.
+ type: str
+ sample: "OpenStack Image Service"
id:
description: Service ID.
type: str
sample: "3292f020780b4d5baf27ff7e1d224c44"
+ is_enabled:
+ description: Service status.
+ type: bool
+ sample: True
+ links:
+ description: Link of the service
+ type: str
+ sample: http://10.0.0.1/identity/v3/services/0ae87
name:
description: Service name.
type: str
@@ -81,32 +89,15 @@ service:
description: Service type.
type: str
sample: "image"
- service_type:
- description: Service type.
- type: str
- sample: "image"
- description:
- description: Service description.
- type: str
- sample: "OpenStack Image Service"
- enabled:
- description: Service status.
- type: bool
- sample: True
-id:
- description: The service ID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class IdentityCatalogServiceModule(OpenStackModule):
+class CatalogServiceModule(OpenStackModule):
argument_spec = dict(
- description=dict(default=None),
- enabled=dict(default=True, aliases=['is_enabled'], type='bool'),
+ description=dict(),
+ is_enabled=dict(aliases=['enabled'], type='bool'),
name=dict(required=True),
type=dict(required=True, aliases=['service_type']),
state=dict(default='present', choices=['absent', 'present']),
@@ -116,73 +107,103 @@ class IdentityCatalogServiceModule(OpenStackModule):
supports_check_mode=True
)
- def _needs_update(self, service):
- for parameter in ('enabled', 'description', 'type'):
- if service[parameter] != self.params[parameter]:
- return True
- return False
-
- def _system_state_change(self, service):
+ def run(self):
state = self.params['state']
- if state == 'absent' and service:
- return True
- if state == 'present':
- if service is None:
- return True
- return self._needs_update(service)
+ service = self._find()
- return False
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, service))
- def run(self):
- description = self.params['description']
- enabled = self.params['enabled']
- name = self.params['name']
- state = self.params['state']
- type = self.params['type']
+ if state == 'present' and not service:
+ # Create service
+ service = self._create()
+ self.exit_json(changed=True,
+ service=service.to_dict(computed=False))
- services = self.conn.search_services(
- name_or_id=name, filters=(dict(type=type) if type else None))
+ elif state == 'present' and service:
+ # Update service
+ update = self._build_update(service)
+ if update:
+ service = self._update(service, update)
- service = None
- if len(services) > 1:
- self.fail_json(
- msg='Service name %s and type %s are not unique'
- % (name, type))
- elif len(services) == 1:
- service = services[0]
+ self.exit_json(changed=bool(update),
+ service=service.to_dict(computed=False))
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(service))
-
- args = {'name': name, 'enabled': enabled, 'type': type}
- if description:
- args['description'] = description
-
- if state == 'present':
- if service is None:
- service = self.conn.create_service(**args)
- changed = True
- else:
- if self._needs_update(service):
- service = self.conn.update_service(service,
- **args)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, service=service, id=service.id)
-
- elif state == 'absent':
- if service is None:
- changed = False
- else:
- self.conn.identity.delete_service(service.id)
- changed = True
- self.exit_json(changed=changed)
+ elif state == 'absent' and service:
+ # Delete service
+ self._delete(service)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not service:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, service):
+ update = {}
+
+ non_updateable_keys = [k for k in ['name']
+ if self.params[k] is not None
+ and self.params[k] != service[k]]
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['description', 'is_enabled', 'type']
+ if self.params[k] is not None
+ and self.params[k] != service[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'is_enabled', 'name', 'type']
+ if self.params[k] is not None)
+
+ return self.conn.identity.create_service(**kwargs)
+
+ def _delete(self, service):
+ self.conn.identity.delete_service(service.id)
+
+ def _find(self):
+ kwargs = dict((k, self.params[k]) for k in ['name', 'type'])
+ matches = list(self.conn.identity.services(**kwargs))
+
+ if len(matches) > 1:
+ self.fail_json(msg='Found more a single service'
+ ' matching the given parameters.')
+ elif len(matches) == 1:
+ return matches[0]
+ else: # len(matches) == 0
+ return None
+
+ def _update(self, service, update):
+ attributes = update.get('attributes')
+ if attributes:
+ service = self.conn.identity.update_service(service.id,
+ **attributes)
+
+ return service
+
+ def _will_change(self, state, service):
+ if state == 'present' and not service:
+ return True
+ elif state == 'present' and service:
+ return bool(self._build_update(service))
+ elif state == 'absent' and service:
+ return True
+ else:
+ # state == 'absent' and not service:
+ return False
def main():
- module = IdentityCatalogServiceModule()
+ module = CatalogServiceModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/catalog_service_info.py b/ansible_collections/openstack/cloud/plugins/modules/catalog_service_info.py
new file mode 100644
index 000000000..f48d9545e
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/catalog_service_info.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 by Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+DOCUMENTATION = r'''
+module: catalog_service_info
+short_description: Retrieve information about services from OpenStack
+author: OpenStack Ansible SIG
+description:
+ - Retrieve information about services from OpenStack.
+options:
+ name:
+ description:
+ - Name or ID of the service.
+ type: str
+extends_documentation_fragment:
+- openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+- name: Fetch all services
+ openstack.cloud.catalog_service_info:
+ cloud: devstack
+
+- name: Fetch a single service
+ openstack.cloud.catalog_service_info:
+ cloud: devstack
+ name: heat
+'''
+
+RETURN = r'''
+services:
+ description: List of dictionaries the services.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: Service ID.
+ type: str
+ sample: "3292f020780b4d5baf27ff7e1d224c44"
+ name:
+ description: Service name.
+ type: str
+ sample: "glance"
+ type:
+ description: Service type.
+ type: str
+ sample: "image"
+ description:
+ description: Service description.
+ type: str
+ sample: "OpenStack Image Service"
+ is_enabled:
+ description: Service status.
+ type: bool
+ sample: True
+ links:
+ description: Link of the service
+ type: str
+ sample: http://10.0.0.1/identity/v3/services/0ae87
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
+ OpenStackModule
+)
+
+
+class CatalogServiceInfoModule(OpenStackModule):
+ argument_spec = dict(
+ name=dict(),
+ )
+
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
+
+ def run(self):
+ name_or_id = self.params['name']
+
+ if name_or_id:
+ service = self.conn.identity.find_service(name_or_id)
+ services = [service] if service else []
+ else:
+ services = self.conn.identity.services()
+
+ self.exit_json(changed=False,
+ services=[s.to_dict(computed=False) for s in services])
+
+
+def main():
+ module = CatalogServiceInfoModule()
+ module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/coe_cluster.py b/ansible_collections/openstack/cloud/plugins/modules/coe_cluster.py
index feb202a3b..3234a574d 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/coe_cluster.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/coe_cluster.py
@@ -1,290 +1,390 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst IT Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: coe_cluster
-short_description: Add/Remove COE cluster from OpenStack Cloud
+short_description: Manage COE cluster in OpenStack Cloud
author: OpenStack Ansible SIG
description:
- - Add or Remove COE cluster from the OpenStack Container Infra service.
+ - Add or remove a COE (Container Orchestration Engine) cluster
+ via OpenStack's Magnum aka Container Infrastructure Management API.
options:
- cluster_template_id:
- description:
- - The template ID of cluster template.
- required: true
+ cluster_template_id:
+ description:
+ - The template ID of cluster template.
+ - Required if I(state) is C(present).
+ type: str
+ discovery_url:
+ description:
+ - URL used for cluster node discovery.
+ type: str
+ flavor_id:
+ description:
+ - The flavor of the minion node for this cluster template.
+ type: str
+ is_floating_ip_enabled:
+ description:
+ - Indicates whether created cluster should have a floating ip.
+ - Whether enable or not using the floating IP of cloud provider. Some
+ cloud providers used floating IP, some used public IP, thus Magnum
+ provide this option for specifying the choice of using floating IP.
+ - If not set, the value of I(is_floating_ip_enabled) of the cluster template
+ specified with I(cluster_template_id) will be used.
+ - When I(is_floating_ip_enabled) is set to C(true), then
+ I(external_network_id) in cluster template must be defined.
+ type: bool
+ aliases: ['floating_ip_enabled']
+ keypair:
+ description:
+ - Name of the keypair to use.
+ type: str
+ labels:
+ description:
+ - One or more key/value pairs.
+ type: raw
+ master_count:
+ description:
+ - The number of master nodes for this cluster.
+ - Magnum's default value for I(master_count) is 1.
+ type: int
+ master_flavor_id:
+ description:
+ - The flavor of the master node for this cluster template.
+ type: str
+ name:
+ description:
+ - Name that has to be given to the cluster template.
+ required: true
+ type: str
+ node_count:
+ description:
+ - The number of nodes for this cluster.
+ - Magnum's default value for I(node_count) is 1.
+ type: int
+ state:
+ description:
+ - Indicate desired state of the resource.
+ choices: [present, absent]
+ default: present
+ type: str
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+cluster:
+ description: Dictionary describing the cluster.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ api_address:
+ description: The endpoint URL of COE API exposed to end-users.
+ type: str
+ sample: https://172.24.4.30:6443
+ cluster_template_id:
+ description: The UUID of the cluster template.
type: str
- discovery_url:
- description:
- - Url used for cluster node discovery
+ sample: '7b1418c8-cea8-48fc-995d-52b66af9a9aa'
+ coe_version:
+ description: Version info of chosen COE in bay/cluster for helping
+ client in picking the right version of client.
type: str
- docker_volume_size:
- description:
- - The size in GB of the docker volume
+ sample: v1.11.1
+ create_timeout:
+ description: Timeout for creating the cluster in minutes.
+ Default to 60 if not set.
type: int
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
+ sample: 60
+ created_at:
+ description: The date and time in UTC at which the cluster is created.
type: str
- keypair:
- description:
- - Name of the keypair to use.
+ sample: "2018-08-16T10:29:45+00:00"
+ discovery_url:
+ description: The custom discovery url for node discovery. This is used
+ by the COE to discover the servers that have been created
+ to host the containers. The actual discovery mechanism
+ varies with the COE. In some cases, the service fills in
+ the server info in the discovery service. In other cases,
+ if the discovery_url is not specified, the service will
+ use the public discovery service at
+ U(https://discovery.etcd.io). In this case, the service
+ will generate a unique url here for each bay and store the
+ info for the servers.
type: str
- labels:
- description:
- - One or more key/value pairs
- type: raw
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
+ sample: https://discovery.etcd.io/a42ee38e7113f31f4d6324f24367aae5
+ fixed_network:
+ description: The name or ID of the network to provide connectivity to the
+ internal network for the bay/cluster.
type: str
- master_count:
- description:
- - The number of master nodes for this cluster
- default: 1
- type: int
- name:
- description:
- - Name that has to be given to the cluster template
- required: true
+ fixed_subnet:
+ description: The fixed subnet to use when allocating network addresses
+ for nodes in bay/cluster.
+ type: str
+ flavor_id:
+ description: The flavor name or ID to use when booting the node servers.
+ Defaults to m1.small.
+ type: str
+ id:
+ description: Unique UUID for this cluster.
type: str
- node_count:
- description:
- - The number of nodes for this cluster
- default: 1
+ sample: '86246a4d-a16c-4a58-9e96ad7719fe0f9d'
+ is_floating_ip_enabled:
+ description: Indicates whether created clusters should have a
+ floating ip or not.
+ type: bool
+ sample: true
+ is_master_lb_enabled:
+ description: Indicates whether created clusters should have a load
+ balancer for master nodes or not.
+ type: bool
+ sample: true
+ keypair:
+ description: Name of the keypair to use.
+ type: str
+ sample: mykey
+ labels:
+ description: One or more key/value pairs.
+ type: dict
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ master_addresses:
+ description: A list of floating IPs of all master nodes.
+ type: list
+ sample: ['172.24.4.5']
+ master_count:
+ description: The number of servers that will serve as master for the
+ bay/cluster. Set to more than 1 master to enable High
+ Availability. If the option master-lb-enabled is specified
+ in the baymodel/cluster template, the master servers will
+ be placed in a load balancer pool. Defaults to 1.
type: int
- state:
- description:
- - Indicate desired state of the resource.
- choices: [present, absent]
- default: present
+ sample: 1
+ master_flavor_id:
+ description: The flavor of the master node for this baymodel/cluster
+ template.
+ type: str
+ sample: c1.c1r1
+ name:
+ description: Name that has to be given to the cluster.
type: str
- timeout:
- description:
- - Timeout for creating the cluster in minutes. Default to 60 mins
- if not set
- default: 60
+ sample: k8scluster
+ node_addresses:
+ description: A list of floating IPs of all servers that serve as nodes.
+ type: list
+ sample: ['172.24.4.8']
+ node_count:
+ description: The number of master nodes for this cluster.
type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The cluster UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-cluster:
- description: Dictionary describing the cluster.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- api_address:
- description:
- - Api address of cluster master node
- type: str
- sample: https://172.24.4.30:6443
- cluster_template_id:
- description: The cluster_template UUID
- type: str
- sample: '7b1418c8-cea8-48fc-995d-52b66af9a9aa'
- coe_version:
- description:
- - Version of the COE software currently running in this cluster
- type: str
- sample: v1.11.1
- container_version:
- description:
- - "Version of the container software. Example: docker version."
- type: str
- sample: 1.12.6
- created_at:
- description:
- - The time in UTC at which the cluster is created
- type: str
- sample: "2018-08-16T10:29:45+00:00"
- create_timeout:
- description:
- - Timeout for creating the cluster in minutes. Default to 60 if
- not set.
- type: int
- sample: 60
- discovery_url:
- description:
- - Url used for cluster node discovery
- type: str
- sample: https://discovery.etcd.io/a42ee38e7113f31f4d6324f24367aae5
- faults:
- description:
- - Fault info collected from the Heat resources of this cluster
- type: dict
- sample: {'0': 'ResourceInError: resources[0].resources...'}
- flavor_id:
- description:
- - The flavor of the minion node for this cluster
- type: str
- sample: c1.c1r1
- keypair:
- description:
- - Name of the keypair to use.
- type: str
- sample: mykey
- labels:
- description: One or more key/value pairs
- type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
- master_addresses:
- description:
- - IP addresses of cluster master nodes
- type: list
- sample: ['172.24.4.5']
- master_count:
- description:
- - The number of master nodes for this cluster.
- type: int
- sample: 1
- master_flavor_id:
- description:
- - The flavor of the master node for this cluster
- type: str
- sample: c1.c1r1
- name:
- description:
- - Name that has to be given to the cluster
- type: str
- sample: k8scluster
- node_addresses:
- description:
- - IP addresses of cluster slave nodes
- type: list
- sample: ['172.24.4.8']
- node_count:
- description:
- - The number of master nodes for this cluster.
- type: int
- sample: 1
- stack_id:
- description:
- - Stack id of the Heat stack
- type: str
- sample: '07767ec6-85f5-44cb-bd63-242a8e7f0d9d'
- status:
- description: Status of the cluster from the heat stack
- type: str
- sample: 'CREATE_COMLETE'
- status_reason:
- description:
- - Status reason of the cluster from the heat stack
- type: str
- sample: 'Stack CREATE completed successfully'
- updated_at:
- description:
- - The time in UTC at which the cluster is updated
- type: str
- sample: '2018-08-16T10:39:25+00:00'
- id:
- description:
- - Unique UUID for this cluster
- type: str
- sample: '86246a4d-a16c-4a58-9e96ad7719fe0f9d'
+ sample: 1
+ stack_id:
+ description: The reference UUID of orchestration stack from Heat
+ orchestration service.
+ type: str
+ sample: '07767ec6-85f5-44cb-bd63-242a8e7f0d9d'
+ status:
+ description: Status of the cluster from the heat stack.
+ type: str
+ sample: 'CREATE_COMLETE'
+ status_reason:
+ description: Status reason of the cluster from the heat stack
+ type: str
+ sample: 'Stack CREATE completed successfully'
+ updated_at:
+ description: The date and time in UTC at which the cluster was updated.
+ type: str
+ sample: '2018-08-16T10:39:25+00:00'
+ uuid:
+ description: Unique UUID for this cluster.
+ type: str
+ sample: '86246a4d-a16c-4a58-9e96ad7719fe0f9d'
'''
-EXAMPLES = '''
-# Create a new Kubernetes cluster
-- openstack.cloud.coe_cluster:
- name: k8s
+EXAMPLES = r'''
+- name: Create a new Kubernetes cluster
+ openstack.cloud.coe_cluster:
+ cloud: devstack
cluster_template_id: k8s-ha
keypair: mykey
master_count: 3
+ name: k8s
node_count: 5
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class CoeClusterModule(OpenStackModule):
+class COEClusterModule(OpenStackModule):
argument_spec = dict(
- cluster_template_id=dict(required=True),
- discovery_url=dict(default=None),
- docker_volume_size=dict(type='int'),
- flavor_id=dict(default=None),
- keypair=dict(default=None, no_log=False),
- labels=dict(default=None, type='raw'),
- master_count=dict(type='int', default=1),
- master_flavor_id=dict(default=None),
+ cluster_template_id=dict(),
+ discovery_url=dict(),
+ flavor_id=dict(),
+ is_floating_ip_enabled=dict(type='bool',
+ aliases=['floating_ip_enabled']),
+ keypair=dict(no_log=False), # := noqa no-log-needed
+ labels=dict(type='raw'),
+ master_count=dict(type='int'),
+ master_flavor_id=dict(),
name=dict(required=True),
- node_count=dict(type='int', default=1),
+ node_count=dict(type='int'),
state=dict(default='present', choices=['absent', 'present']),
- timeout=dict(type='int', default=60),
)
- module_kwargs = dict()
-
- def _parse_labels(self, labels):
- if isinstance(labels, str):
- labels_dict = {}
- for kv_str in labels.split(","):
- k, v = kv_str.split("=")
- labels_dict[k] = v
- return labels_dict
- if not labels:
- return {}
- return labels
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('cluster_template_id',))
+ ],
+ supports_check_mode=True,
+ )
def run(self):
- params = self.params.copy()
-
state = self.params['state']
+
+ cluster = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, cluster))
+
+ if state == 'present' and not cluster:
+ # Create cluster
+ cluster = self._create()
+ self.exit_json(changed=True,
+ cluster=cluster.to_dict(computed=False))
+
+ elif state == 'present' and cluster:
+ # Update cluster
+ update = self._build_update(cluster)
+ if update:
+ cluster = self._update(cluster, update)
+
+ self.exit_json(changed=bool(update),
+ cluster=cluster.to_dict(computed=False))
+
+ elif state == 'absent' and cluster:
+ # Delete cluster
+ self._delete(cluster)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not cluster:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, cluster):
+ update = {}
+
+ # TODO: Implement support for updates.
+ non_updateable_keys = [k for k in ['cluster_template_id',
+ 'discovery_url', 'flavor_id',
+ 'is_floating_ip_enabled', 'keypair',
+ 'master_count', 'master_flavor_id',
+ 'name', 'node_count']
+ if self.params[k] is not None
+ and self.params[k] != cluster[k]]
+
+ labels = self.params['labels']
+ if labels is not None:
+ if isinstance(labels, str):
+ labels = dict([tuple(kv.split(":"))
+ for kv in labels.split(",")])
+ if labels != cluster['labels']:
+ non_updateable_keys.append('labels')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in []
+ if self.params[k] is not None
+ and self.params[k] != cluster[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ # TODO: Complement *_id parameters with find_* functions to allow
+ # specifying names in addition to IDs.
+ kwargs = dict((k, self.params[k])
+ for k in ['cluster_template_id', 'discovery_url',
+ 'flavor_id', 'is_floating_ip_enabled',
+ 'keypair', 'master_count', 'master_flavor_id',
+ 'name', 'node_count']
+ if self.params[k] is not None)
+
+ labels = self.params['labels']
+ if labels is not None:
+ if isinstance(labels, str):
+ labels = dict([tuple(kv.split(":"))
+ for kv in labels.split(",")])
+ kwargs['labels'] = labels
+
+ kwargs['create_timeout'] = self.params['timeout']
+
+ cluster = self.conn.container_infrastructure_management.\
+ create_cluster(**kwargs)
+
+ if not self.params['wait']:
+ # openstacksdk's create_cluster() returns a cluster's id only
+ # but we cannot use self.conn.container_infrastructure_management.\
+ # get_cluster(cluster_id) because it might return None as long as
+ # the cluster is being set up.
+ return cluster
+
+ if self.params['wait']:
+ cluster = self.sdk.resource.wait_for_status(
+ self.conn.container_infrastructure_management, cluster,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'])
+
+ return cluster
+
+ def _delete(self, cluster):
+ self.conn.container_infrastructure_management.\
+ delete_cluster(cluster['id'])
+
+ if self.params['wait']:
+ self.sdk.resource.wait_for_delete(
+ self.conn.container_infrastructure_management, cluster,
+ interval=None, wait=self.params['timeout'])
+
+ def _find(self):
name = self.params['name']
+ filters = {}
+
cluster_template_id = self.params['cluster_template_id']
+ if cluster_template_id is not None:
+ filters['cluster_template_id'] = cluster_template_id
+
+ return self.conn.get_coe_cluster(name_or_id=name, filters=filters)
+
+ def _update(self, cluster, update):
+ attributes = update.get('attributes')
+ if attributes:
+ # TODO: Implement support for updates.
+ # cluster = self.conn.container_infrastructure_management.\
+ # update_cluster(...)
+ pass
+
+ return cluster
- kwargs = dict(
- discovery_url=self.params['discovery_url'],
- docker_volume_size=self.params['docker_volume_size'],
- flavor_id=self.params['flavor_id'],
- keypair=self.params['keypair'],
- labels=self._parse_labels(params['labels']),
- master_count=self.params['master_count'],
- master_flavor_id=self.params['master_flavor_id'],
- node_count=self.params['node_count'],
- create_timeout=self.params['timeout'],
- )
-
- changed = False
- cluster = self.conn.get_coe_cluster(
- name_or_id=name, filters={'cluster_template_id': cluster_template_id})
-
- if state == 'present':
- if not cluster:
- cluster = self.conn.create_coe_cluster(
- name, cluster_template_id=cluster_template_id, **kwargs)
- changed = True
- else:
- changed = False
-
- # NOTE (brtknr): At present, create_coe_cluster request returns
- # cluster_id as `uuid` whereas get_coe_cluster request returns the
- # same field as `id`. This behaviour may change in the future
- # therefore try `id` first then `uuid`.
- cluster_id = cluster.get('id', cluster.get('uuid'))
- cluster['id'] = cluster['uuid'] = cluster_id
- self.exit_json(changed=changed, cluster=cluster, id=cluster_id)
- elif state == 'absent':
- if not cluster:
- self.exit_json(changed=False)
- else:
- self.conn.delete_coe_cluster(name)
- self.exit_json(changed=True)
+ def _will_change(self, state, cluster):
+ if state == 'present' and not cluster:
+ return True
+ elif state == 'present' and cluster:
+ return bool(self._build_update(cluster))
+ elif state == 'absent' and cluster:
+ return True
+ else:
+ # state == 'absent' and not cluster:
+ return False
def main():
- module = CoeClusterModule()
+ module = COEClusterModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/coe_cluster_template.py b/ansible_collections/openstack/cloud/plugins/modules/coe_cluster_template.py
index 0596f39b7..ecc3a6889 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/coe_cluster_template.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/coe_cluster_template.py
@@ -1,386 +1,524 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst IT Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: coe_cluster_template
-short_description: Add/Remove COE cluster template from OpenStack Cloud
+short_description: Manage COE cluster template in OpenStack Cloud
author: OpenStack Ansible SIG
description:
- - Add or Remove COE cluster template from the OpenStack Container Infra
- service.
+ - Add or remove a COE (Container Orchestration Engine) cluster template
+ via OpenStack's Magnum aka Container Infrastructure Management API.
options:
- coe:
- description:
- - The Container Orchestration Engine for this clustertemplate
- choices: [kubernetes, swarm, mesos]
+ coe:
+ description:
+ - The Container Orchestration Engine for this cluster template
+ - Required if I(state) is C(present).
+ choices: [kubernetes, swarm, mesos]
+ type: str
+ dns_nameserver:
+ description:
+ - The DNS nameserver address.
+ - Magnum's default value for I(dns_nameserver) is C(8.8.8.8).
+ type: str
+ docker_storage_driver:
+ description:
+ - Docker storage driver.
+ choices: [devicemapper, overlay, overlay2]
+ type: str
+ docker_volume_size:
+ description:
+ - The size in GB of the docker volume.
+ type: int
+ external_network_id:
+ description:
+ - The external network to attach to the cluster.
+ - When I(is_floating_ip_enabled) is set to C(true), then
+ I(external_network_id) must be defined.
+ type: str
+ fixed_network:
+ description:
+ - The fixed network name or id to attach to the cluster.
+ type: str
+ fixed_subnet:
+ description:
+ - The fixed subnet name or id to attach to the cluster.
+ type: str
+ flavor_id:
+ description:
+ - The flavor of the minion node for this cluster template.
+ type: str
+ is_floating_ip_enabled:
+ description:
+ - Indicates whether created clusters should have a floating ip or not.
+ - When I(is_floating_ip_enabled) is set to C(true), then
+ I(external_network_id) must be defined.
+ type: bool
+ default: true
+ aliases: ['floating_ip_enabled']
+ is_master_lb_enabled:
+ description:
+ - Indicates whether created clusters should have a load balancer
+ for master nodes or not.
+ - Magnum's default value for I(is_master_lb_enabled) is C(true),
+ ours is C(false).
+ type: bool
+ default: false
+ aliases: ['master_lb_enabled']
+ is_public:
+ description:
+ - Indicates whether the cluster template is public or not.
+ - Magnum's default value for I(is_public) is C(false).
+ type: bool
+ aliases: ['public']
+ is_registry_enabled:
+ description:
+ - Indicates whether the docker registry is enabled.
+ - Magnum's default value for I(is_registry_enabled) is C(false).
+ type: bool
+ aliases: ['registry_enabled']
+ is_tls_disabled:
+ description:
+ - Indicates whether the TLS should be disabled.
+ - Magnum's default value for I(is_tls_disabled) is C(false).
+ type: bool
+ aliases: ['tls_disabled']
+ keypair_id:
+ description:
+ - Name or ID of the keypair to use.
+ type: str
+ image_id:
+ description:
+ - Image id the cluster will be based on.
+ - Required if I(state) is C(present).
+ type: str
+ labels:
+ description:
+ - One or more key/value pairs.
+ type: raw
+ http_proxy:
+ description:
+ - Address of a proxy that will receive all HTTP requests and relay them.
+ - The format is a URL including a port number.
+ type: str
+ https_proxy:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a URL including a port number.
+ type: str
+ master_flavor_id:
+ description:
+ - The flavor of the master node for this cluster template.
+ type: str
+ name:
+ description:
+ - Name that has to be given to the cluster template.
+ required: true
+ type: str
+ network_driver:
+ description:
+ - The name of the driver used for instantiating container networks.
+ choices: [flannel, calico, docker]
+ type: str
+ no_proxy:
+ description:
+ - A comma separated list of IPs for which proxies should not be
+ used in the cluster.
+ type: str
+ server_type:
+ description:
+ - Server type for this cluster template.
+ - Magnum's default value for I(server_type) is C(vm).
+ choices: [vm, bm]
+ type: str
+ state:
+ description:
+ - Indicate desired state of the resource.
+ choices: [present, absent]
+ default: present
+ type: str
+ volume_driver:
+ description:
+ - The name of the driver used for instantiating container volumes.
+ choices: [cinder, rexray]
+ type: str
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+cluster_template:
+ description: Dictionary describing the template.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ apiserver_port:
+ description: The exposed port of COE API server.
+ type: int
+ cluster_distro:
+ description: Display the attribute os_distro defined as appropriate
+ metadata in image for the bay/cluster driver.
+ type: str
+ coe:
+ description: The Container Orchestration Engine for this cluster
+ template. Supported COEs include kubernetes, swarm, mesos.
+ type: str
+ sample: kubernetes
+ created_at:
+ description: The date and time when the resource was created.
type: str
- required: true
- dns_nameserver:
- description:
- - The DNS nameserver address
- default: '8.8.8.8'
+ dns_nameserver:
+ description: The DNS nameserver for the servers and containers in the
+ bay/cluster to use.
type: str
- docker_storage_driver:
- description:
- - Docker storage driver
- choices: [devicemapper, overlay, overlay2]
+ sample: '8.8.8.8'
+ docker_storage_driver:
+ description: "The name of a driver to manage the storage for the images
+ and the container's writable layer."
type: str
- docker_volume_size:
- description:
- - The size in GB of the docker volume
+ docker_volume_size:
+ description: The size in GB for the local storage on each server for the
+ Docker daemon to cache the images and host the containers.
type: int
- external_network_id:
- description:
- - The external network to attach to the Cluster
+ sample: 5
+ external_network_id:
+ description: The name or network ID of a Neutron network to provide
+ connectivity to the external internet for the bay/cluster.
type: str
- fixed_network:
- description:
- - The fixed network name to attach to the Cluster
+ sample: public
+ fixed_network:
+ description: The fixed network name to attach to the cluster.
type: str
- fixed_subnet:
- description:
- - The fixed subnet name to attach to the Cluster
+ sample: 07767ec6-85f5-44cb-bd63-242a8e7f0d9d
+ fixed_subnet:
+ description: The fixed subnet name to attach to the cluster.
type: str
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
+ sample: 05567ec6-85f5-44cb-bd63-242a8e7f0d9d
+ flavor_id:
+ description: The nova flavor ID or name for booting the node servers.
type: str
- floating_ip_enabled:
- description:
- - Indicates whether created clusters should have a floating ip or not
- type: bool
- default: true
- keypair_id:
- description:
- - Name or ID of the keypair to use.
+ sample: c1.c1r1
+ http_proxy:
+ description: Address of a proxy that will receive all HTTP requests
+ and relay them. The format is a URL including a port
+ number.
type: str
- image_id:
- description:
- - Image id the cluster will be based on
+ sample: http://10.0.0.11:9090
+ https_proxy:
+ description: Address of a proxy that will receive all HTTPS requests
+ and relay them. The format is a URL including a port
+ number.
type: str
- required: true
- labels:
- description:
- - One or more key/value pairs
- type: raw
- http_proxy:
- description:
- - Address of a proxy that will receive all HTTP requests and relay them
- The format is a URL including a port number
+ sample: https://10.0.0.10:8443
+ id:
+ description: The UUID of the cluster template.
type: str
- https_proxy:
- description:
- - Address of a proxy that will receive all HTTPS requests and relay
- them. The format is a URL including a port number
+ image_id:
+ description: The name or UUID of the base image in Glance to boot the
+ servers for the bay/cluster.
type: str
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
+ sample: 05567ec6-85f5-44cb-bd63-242a8e7f0e9d
+ insecure_registry:
+ description: "The URL pointing to users's own private insecure docker
+ registry to deploy and run docker containers."
type: str
- master_lb_enabled:
- description:
- - Indicates whether created clusters should have a load balancer
- for master nodes or not
+ is_floating_ip_enabled:
+ description: Indicates whether created clusters should have a
+ floating ip or not.
+ type: bool
+ sample: true
+ is_hidden:
+ description: Indicates whether the cluster template is hidden or not.
+ type: bool
+ sample: false
+ is_master_lb_enabled:
+ description: Indicates whether created clusters should have a load
+ balancer for master nodes or not.
+ type: bool
+ sample: true
+ is_public:
+ description: Access to a baymodel/cluster template is normally limited to
+ the admin, owner or users within the same tenant as the
+ owners. Setting this flag makes the baymodel/cluster
+ template public and accessible by other users. The default
+ is not public.
+ type: bool
+ sample: false
+ is_registry_enabled:
+ description: "Docker images by default are pulled from the public Docker
+ registry, but in some cases, users may want to use a
+ private registry. This option provides an alternative
+ registry based on the Registry V2: Magnum will create a
+ local registry in the bay/cluster backed by swift to host
+ the images. The default is to use the public registry."
type: bool
- default: 'no'
- name:
- description:
- - Name that has to be given to the cluster template
- required: true
+ sample: false
+ is_tls_disabled:
+ description: Transport Layer Security (TLS) is normally enabled to secure
+ the bay/cluster. In some cases, users may want to disable
+ TLS in the bay/cluster, for instance during development or
+ to troubleshoot certain problems. Specifying this parameter
+ will disable TLS so that users can access the COE endpoints
+ without a certificate. The default is TLS enabled.
+ type: bool
+ sample: false
+ keypair_id:
+ description: Name of the SSH keypair to configure in the bay/cluster
+ servers for ssh access.
type: str
- network_driver:
- description:
- - The name of the driver used for instantiating container networks
- choices: [flannel, calico, docker]
+ sample: mykey
+ labels:
+ description: One or more key/value pairs.
+ type: dict
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ master_flavor_id:
+ description: The flavor of the master node for this cluster template.
type: str
- no_proxy:
- description:
- - A comma separated list of IPs for which proxies should not be
- used in the cluster
+ sample: c1.c1r1
+ name:
+ description: Name that has to be given to the cluster template.
type: str
- public:
- description:
- - Indicates whether the ClusterTemplate is public or not
- type: bool
- default: 'no'
- registry_enabled:
- description:
- - Indicates whether the docker registry is enabled
- type: bool
- default: 'no'
- server_type:
- description:
- - Server type for this ClusterTemplate
- choices: [vm, bm]
- default: vm
+ sample: k8scluster
+ network_driver:
+ description: The name of a network driver for providing the networks for
+ the containers
type: str
- state:
- description:
- - Indicate desired state of the resource.
- choices: [present, absent]
- default: present
+ sample: calico
+ no_proxy:
+ description: A comma separated list of IPs for which proxies should
+ not be used in the cluster.
type: str
- tls_disabled:
- description:
- - Indicates whether the TLS should be disabled
- type: bool
- default: 'no'
- volume_driver:
- description:
- - The name of the driver used for instantiating container volumes
- choices: [cinder, rexray]
+ sample: 10.0.0.4,10.0.0.5
+ server_type:
+ description: The servers in the bay/cluster can be vm or baremetal.
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The cluster UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-cluster_template:
- description: Dictionary describing the template.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- coe:
- description: The Container Orchestration Engine for this clustertemplate
- type: str
- sample: kubernetes
- dns_nameserver:
- description: The DNS nameserver address
- type: str
- sample: '8.8.8.8'
- docker_storage_driver:
- description: Docker storage driver
- type: str
- sample: devicemapper
- docker_volume_size:
- description: The size in GB of the docker volume
- type: int
- sample: 5
- external_network_id:
- description: The external network to attach to the Cluster
- type: str
- sample: public
- fixed_network:
- description: The fixed network name to attach to the Cluster
- type: str
- sample: 07767ec6-85f5-44cb-bd63-242a8e7f0d9d
- fixed_subnet:
- description:
- - The fixed subnet name to attach to the Cluster
- type: str
- sample: 05567ec6-85f5-44cb-bd63-242a8e7f0d9d
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
- type: str
- sample: c1.c1r1
- floating_ip_enabled:
- description:
- - Indicates whether created clusters should have a floating ip or not
- type: bool
- sample: true
- keypair_id:
- description:
- - Name or ID of the keypair to use.
- type: str
- sample: mykey
- image_id:
- description:
- - Image id the cluster will be based on
- type: str
- sample: 05567ec6-85f5-44cb-bd63-242a8e7f0e9d
- labels:
- description: One or more key/value pairs
- type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
- http_proxy:
- description:
- - Address of a proxy that will receive all HTTP requests and relay them
- The format is a URL including a port number
- type: str
- sample: http://10.0.0.11:9090
- https_proxy:
- description:
- - Address of a proxy that will receive all HTTPS requests and relay
- them. The format is a URL including a port number
- type: str
- sample: https://10.0.0.10:8443
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
- type: str
- sample: c1.c1r1
- master_lb_enabled:
- description:
- - Indicates whether created clusters should have a load balancer
- for master nodes or not
- type: bool
- sample: true
- name:
- description:
- - Name that has to be given to the cluster template
- type: str
- sample: k8scluster
- network_driver:
- description:
- - The name of the driver used for instantiating container networks
- type: str
- sample: calico
- no_proxy:
- description:
- - A comma separated list of IPs for which proxies should not be
- used in the cluster
- type: str
- sample: 10.0.0.4,10.0.0.5
- public:
- description:
- - Indicates whether the ClusterTemplate is public or not
- type: bool
- sample: false
- registry_enabled:
- description:
- - Indicates whether the docker registry is enabled
- type: bool
- sample: false
- server_type:
- description:
- - Server type for this ClusterTemplate
- type: str
- sample: vm
- tls_disabled:
- description:
- - Indicates whether the TLS should be disabled
- type: bool
- sample: false
- volume_driver:
- description:
- - The name of the driver used for instantiating container volumes
- type: str
- sample: cinder
+ sample: vm
+ updated_at:
+ description: The date and time when the resource was updated.
+ type: str
+ uuid:
+ description: The UUID of the cluster template.
+ type: str
+ volume_driver:
+ description: The name of a volume driver for managing the persistent
+ storage for the containers.
+ type: str
+ sample: cinder
'''
-EXAMPLES = '''
-# Create a new Kubernetes cluster template
-- openstack.cloud.coe_cluster_template:
- name: k8s
+EXAMPLES = r'''
+- name: Create a new Kubernetes cluster template
+ openstack.cloud.coe_cluster_template:
+ cloud: devstack
coe: kubernetes
- keypair_id: mykey
image_id: 2a8c9888-9054-4b06-a1ca-2bb61f9adb72
- public: no
+ keypair_id: mykey
+ name: k8s
+ is_public: false
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class CoeClusterTemplateModule(OpenStackModule):
+class COEClusterTemplateModule(OpenStackModule):
argument_spec = dict(
- coe=dict(required=True, choices=['kubernetes', 'swarm', 'mesos']),
- dns_nameserver=dict(default='8.8.8.8'),
- docker_storage_driver=dict(choices=['devicemapper', 'overlay', 'overlay2']),
+ coe=dict(choices=['kubernetes', 'swarm', 'mesos']),
+ dns_nameserver=dict(),
+ docker_storage_driver=dict(choices=['devicemapper', 'overlay',
+ 'overlay2']),
docker_volume_size=dict(type='int'),
- external_network_id=dict(default=None),
- fixed_network=dict(default=None),
- fixed_subnet=dict(default=None),
- flavor_id=dict(default=None),
- floating_ip_enabled=dict(type='bool', default=True),
- keypair_id=dict(default=None),
- image_id=dict(required=True),
- labels=dict(default=None, type='raw'),
- http_proxy=dict(default=None),
- https_proxy=dict(default=None),
- master_lb_enabled=dict(type='bool', default=False),
- master_flavor_id=dict(default=None),
+ external_network_id=dict(),
+ fixed_network=dict(),
+ fixed_subnet=dict(),
+ flavor_id=dict(),
+ http_proxy=dict(),
+ https_proxy=dict(),
+ image_id=dict(),
+ is_floating_ip_enabled=dict(type='bool', default=True,
+ aliases=['floating_ip_enabled']),
+ keypair_id=dict(),
+ labels=dict(type='raw'),
+ master_flavor_id=dict(),
+ is_master_lb_enabled=dict(type='bool', default=False,
+ aliases=['master_lb_enabled']),
+ is_public=dict(type='bool', aliases=['public']),
+ is_registry_enabled=dict(type='bool', aliases=['registry_enabled']),
+ is_tls_disabled=dict(type='bool', aliases=['tls_disabled']),
name=dict(required=True),
network_driver=dict(choices=['flannel', 'calico', 'docker']),
- no_proxy=dict(default=None),
- public=dict(type='bool', default=False),
- registry_enabled=dict(type='bool', default=False),
- server_type=dict(default="vm", choices=['vm', 'bm']),
+ no_proxy=dict(),
+ server_type=dict(choices=['vm', 'bm']),
state=dict(default='present', choices=['absent', 'present']),
- tls_disabled=dict(type='bool', default=False),
volume_driver=dict(choices=['cinder', 'rexray']),
)
- module_kwargs = dict()
-
- def _parse_labels(self, labels):
- if isinstance(labels, str):
- labels_dict = {}
- for kv_str in labels.split(","):
- k, v = kv_str.split("=")
- labels_dict[k] = v
- return labels_dict
- if not labels:
- return {}
- return labels
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('coe', 'image_id')),
+ ],
+ supports_check_mode=True,
+ )
def run(self):
- params = self.params.copy()
-
state = self.params['state']
+
+ cluster_template = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, cluster_template))
+
+ if state == 'present' and not cluster_template:
+ # Create cluster_template
+ cluster_template = self._create()
+ self.exit_json(
+ changed=True,
+ cluster_template=cluster_template.to_dict(computed=False))
+
+ elif state == 'present' and cluster_template:
+ # Update cluster_template
+ update = self._build_update(cluster_template)
+ if update:
+ cluster_template = self._update(cluster_template, update)
+
+ self.exit_json(
+ changed=bool(update),
+ cluster_template=cluster_template.to_dict(computed=False))
+
+ elif state == 'absent' and cluster_template:
+ # Delete cluster_template
+ self._delete(cluster_template)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not cluster_template:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, cluster_template):
+ update = {}
+
+ if self.params['is_floating_ip_enabled'] \
+ and self.params['external_network_id'] is None:
+ raise ValueError('is_floating_ip_enabled is True'
+ ' but external_network_id is missing')
+
+ # TODO: Implement support for updates.
+ non_updateable_keys = [k for k in ['coe', 'dns_nameserver',
+ 'docker_storage_driver',
+ 'docker_volume_size',
+ 'external_network_id',
+ 'fixed_network',
+ 'fixed_subnet', 'flavor_id',
+ 'http_proxy', 'https_proxy',
+ 'image_id',
+ 'is_floating_ip_enabled',
+ 'is_master_lb_enabled',
+ 'is_public', 'is_registry_enabled',
+ 'is_tls_disabled', 'keypair_id',
+ 'master_flavor_id', 'name',
+ 'network_driver', 'no_proxy',
+ 'server_type', 'volume_driver']
+ if self.params[k] is not None
+ and self.params[k] != cluster_template[k]]
+
+ labels = self.params['labels']
+ if labels is not None:
+ if isinstance(labels, str):
+ labels = dict([tuple(kv.split(":"))
+ for kv in labels.split(",")])
+ if labels != cluster_template['labels']:
+ non_updateable_keys.append('labels')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in []
+ if self.params[k] is not None
+ and self.params[k] != cluster_template[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ if self.params['is_floating_ip_enabled'] \
+ and self.params['external_network_id'] is None:
+ raise ValueError('is_floating_ip_enabled is True'
+ ' but external_network_id is missing')
+
+ # TODO: Complement *_id parameters with find_* functions to allow
+ # specifying names in addition to IDs.
+ kwargs = dict((k, self.params[k])
+ for k in ['coe', 'dns_nameserver',
+ 'docker_storage_driver', 'docker_volume_size',
+ 'external_network_id', 'fixed_network',
+ 'fixed_subnet', 'flavor_id', 'http_proxy',
+ 'https_proxy', 'image_id',
+ 'is_floating_ip_enabled',
+ 'is_master_lb_enabled', 'is_public',
+ 'is_registry_enabled', 'is_tls_disabled',
+ 'keypair_id', 'master_flavor_id', 'name',
+ 'network_driver', 'no_proxy', 'server_type',
+ 'volume_driver']
+ if self.params[k] is not None)
+
+ labels = self.params['labels']
+ if labels is not None:
+ if isinstance(labels, str):
+ labels = dict([tuple(kv.split(":"))
+ for kv in labels.split(",")])
+ kwargs['labels'] = labels
+
+ return self.conn.container_infrastructure_management.\
+ create_cluster_template(**kwargs)
+
+ def _delete(self, cluster_template):
+ self.conn.container_infrastructure_management.\
+ delete_cluster_template(cluster_template['id'])
+
+ def _find(self):
name = self.params['name']
- coe = self.params['coe']
+ filters = {}
+
image_id = self.params['image_id']
+ if image_id is not None:
+ filters['image_id'] = image_id
- kwargs = dict(
- dns_nameserver=self.params['dns_nameserver'],
- docker_storage_driver=self.params['docker_storage_driver'],
- docker_volume_size=self.params['docker_volume_size'],
- external_network_id=self.params['external_network_id'],
- fixed_network=self.params['fixed_network'],
- fixed_subnet=self.params['fixed_subnet'],
- flavor_id=self.params['flavor_id'],
- floating_ip_enabled=self.params['floating_ip_enabled'],
- keypair_id=self.params['keypair_id'],
- labels=self._parse_labels(params['labels']),
- http_proxy=self.params['http_proxy'],
- https_proxy=self.params['https_proxy'],
- master_lb_enabled=self.params['master_lb_enabled'],
- master_flavor_id=self.params['master_flavor_id'],
- network_driver=self.params['network_driver'],
- no_proxy=self.params['no_proxy'],
- public=self.params['public'],
- registry_enabled=self.params['registry_enabled'],
- server_type=self.params['server_type'],
- tls_disabled=self.params['tls_disabled'],
- volume_driver=self.params['volume_driver'],
- )
-
- changed = False
- template = self.conn.get_coe_cluster_template(
- name_or_id=name, filters={'coe': coe, 'image_id': image_id})
-
- if state == 'present':
- if not template:
- template = self.conn.create_coe_cluster_template(
- name, coe=coe, image_id=image_id, **kwargs)
- changed = True
- else:
- changed = False
+ coe = self.params['coe']
+ if coe is not None:
+ filters['coe'] = coe
- self.exit_json(
- changed=changed, cluster_template=template, id=template['uuid'])
- elif state == 'absent':
- if not template:
- self.exit_json(changed=False)
- else:
- self.conn.delete_coe_cluster_template(name)
- self.exit_json(changed=True)
+ return self.conn.get_cluster_template(name_or_id=name,
+ filters=filters)
+
+ def _update(self, cluster_template, update):
+ attributes = update.get('attributes')
+ if attributes:
+ # TODO: Implement support for updates.
+ # cluster_template = self.conn.\
+ # container_infrastructure_management.update_cluster_template(...)
+ pass
+
+ return cluster_template
+
+ def _will_change(self, state, cluster_template):
+ if state == 'present' and not cluster_template:
+ return True
+ elif state == 'present' and cluster_template:
+ return bool(self._build_update(cluster_template))
+ elif state == 'absent' and cluster_template:
+ return True
+ else:
+ # state == 'absent' and not cluster_template:
+ return False
def main():
- module = CoeClusterTemplateModule()
+ module = COEClusterTemplateModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/compute_flavor.py b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor.py
index 8a993ca51..7a2ee25e5 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/compute_flavor.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -9,74 +10,78 @@ module: compute_flavor
short_description: Manage OpenStack compute flavors
author: OpenStack Ansible SIG
description:
- - Add or remove flavors from OpenStack.
+ - Add or remove compute flavors from OpenStack.
+ - Updating a flavor consists of deleting and (re)creating a flavor.
options:
- state:
- description:
- - Indicate desired state of the resource. When I(state) is 'present',
- then I(ram), I(vcpus), and I(disk) are all required. There are no
- default values for those parameters.
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Flavor name.
- required: true
- type: str
- ram:
- description:
- - Amount of memory, in MB.
- type: int
- vcpus:
- description:
- - Number of virtual CPUs.
- type: int
- disk:
- description:
- - Size of local disk, in GB.
- default: 0
- type: int
- ephemeral:
- description:
- - Ephemeral space size, in GB.
- default: 0
- type: int
- swap:
- description:
- - Swap space size, in MB.
- default: 0
- type: int
- rxtx_factor:
- description:
- - RX/TX factor.
- default: 1.0
- type: float
- is_public:
- description:
- - Make flavor accessible to the public.
- type: bool
- default: 'yes'
- flavorid:
- description:
- - ID for the flavor. This is optional as a unique UUID will be
- assigned if a value is not specified.
- default: "auto"
- type: str
- extra_specs:
- description:
- - Metadata dictionary
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Description of the flavor.
+ type: str
+ disk:
+ description:
+ - Size of local disk, in GB.
+ - Required when I(state) is C(present).
+ type: int
+ ephemeral:
+ description:
+ - Ephemeral space size, in GB.
+ type: int
+ extra_specs:
+ description:
+ - Metadata dictionary
+ type: dict
+ id:
+ description:
+ - ID for the flavor. This is optional as a unique UUID will be
+ assigned if a value is not specified.
+ - Note that this ID will only be used when first creating the flavor.
+ - The ID of an existing flavor cannot be changed.
+ - When I(id) is set to C(auto), a new id will be autogenerated.
+ C(auto) is kept for backward compatibility and
+ will be dropped in the next major release.
+ type: str
+ aliases: ['flavorid']
+ is_public:
+ description:
+ - Make flavor accessible to the public.
+ type: bool
+ name:
+ description:
+ - Flavor name.
+ required: true
+ type: str
+ ram:
+ description:
+ - Amount of memory, in MB.
+ - Required when I(state) is C(present).
+ type: int
+ rxtx_factor:
+ description:
+ - RX/TX factor.
+ type: float
+ state:
+ description:
+ - Indicate desired state of the resource.
+ - When I(state) is C(present), then I(ram), I(vcpus), and I(disk) are
+ required. There are no default values for those parameters.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ swap:
+ description:
+ - Swap space size, in MB.
+ type: int
+ vcpus:
+ description:
+ - Number of virtual CPUs.
+ - Required when I(state) is C(present).
+ type: int
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
-- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
+- name: Create tiny flavor with 1024MB RAM, 1 vCPU, 10GB disk, 10GB ephemeral
openstack.cloud.compute_flavor:
cloud: mycloud
state: present
@@ -85,8 +90,9 @@ EXAMPLES = '''
vcpus: 1
disk: 10
ephemeral: 10
+ description: "I am flavor mycloud"
-- name: "Delete 'tiny' flavor"
+- name: Delete tiny flavor
openstack.cloud.compute_flavor:
cloud: mycloud
state: absent
@@ -107,57 +113,77 @@ EXAMPLES = '''
RETURN = '''
flavor:
- description: Dictionary describing the flavor.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
- disk:
- description: Size of local disk, in GB.
- returned: success
- type: int
- sample: 10
- ephemeral:
- description: Ephemeral space size, in GB.
- returned: success
- type: int
- sample: 10
- ram:
- description: Amount of memory, in MB.
- returned: success
- type: int
- sample: 1024
- swap:
- description: Swap space size, in MB.
- returned: success
- type: int
- sample: 100
- vcpus:
- description: Number of virtual CPUs.
- returned: success
- type: int
- sample: 2
- is_public:
- description: Make flavor accessible to the public.
- returned: success
- type: bool
- sample: true
- extra_specs:
- description: Flavor metadata
- returned: success
- type: dict
- sample:
- "quota:disk_read_iops_sec": 5000
- "aggregate_instance_extra_specs:pinned": false
+ description: Dictionary describing the flavor.
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ description:
+ description: Description attached to flavor
+ returned: success
+ type: str
+ sample: Example description
+ disk:
+ description: Size of local disk, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ephemeral:
+ description: Ephemeral space size, in GB.
+ returned: success
+ type: int
+ sample: 10
+ extra_specs:
+ description: Flavor metadata
+ returned: success
+ type: dict
+ sample:
+ "quota:disk_read_iops_sec": 5000
+ "aggregate_instance_extra_specs:pinned": false
+ id:
+ description: Flavor ID.
+ returned: success
+ type: str
+ sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
+ is_disabled:
+ description: Whether the flavor is disabled
+ returned: success
+ type: bool
+ sample: true
+ is_public:
+ description: Make flavor accessible to the public.
+ returned: success
+ type: bool
+ sample: true
+ name:
+ description: Flavor name.
+ returned: success
+ type: str
+ sample: "tiny"
+ original_name:
+ description: The name of this flavor when returned by server list/show
+ type: str
+ returned: success
+ ram:
+ description: Amount of memory, in MB.
+ returned: success
+ type: int
+ sample: 1024
+ rxtx_factor:
+ description: |
+ The bandwidth scaling factor this flavor receives on the network
+ returned: success
+ type: int
+ sample: 100
+ swap:
+ description: Swap space size, in MB.
+ returned: success
+ type: int
+ sample: 100
+ vcpus:
+ description: Number of virtual CPUs.
+ returned: success
+ type: int
+ sample: 2
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -165,21 +191,18 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ComputeFlavorModule(OpenStackModule):
argument_spec = dict(
- state=dict(required=False, default='present',
- choices=['absent', 'present']),
+ description=dict(),
+ disk=dict(type='int'),
+ ephemeral=dict(type='int'),
+ extra_specs=dict(type='dict'),
+ id=dict(aliases=['flavorid']),
+ is_public=dict(type='bool'),
name=dict(required=True),
-
- # required when state is 'present'
- ram=dict(required=False, type='int'),
- vcpus=dict(required=False, type='int'),
-
- disk=dict(required=False, default=0, type='int'),
- ephemeral=dict(required=False, default=0, type='int'),
- swap=dict(required=False, default=0, type='int'),
- rxtx_factor=dict(required=False, default=1.0, type='float'),
- is_public=dict(required=False, default=True, type='bool'),
- flavorid=dict(required=False, default="auto"),
- extra_specs=dict(required=False, default=None, type='dict'),
+ ram=dict(type='int'),
+ rxtx_factor=dict(type='float'),
+ state=dict(default='present', choices=['absent', 'present']),
+ swap=dict(type='int'),
+ vcpus=dict(type='int'),
)
module_kwargs = dict(
@@ -189,81 +212,151 @@ class ComputeFlavorModule(OpenStackModule):
supports_check_mode=True
)
- def _system_state_change(self, flavor):
- state = self.params['state']
- if state == 'present' and not flavor:
- return True
- if state == 'absent' and flavor:
- return True
- return False
-
def run(self):
state = self.params['state']
+ id = self.params['id']
name = self.params['name']
- extra_specs = self.params['extra_specs'] or {}
-
- flavor = self.conn.get_flavor(name)
+ name_or_id = id if id and id != 'auto' else name
+ flavor = self.conn.compute.find_flavor(name_or_id,
+ get_extra_specs=True)
if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(flavor))
-
- if state == 'present':
- old_extra_specs = {}
- require_update = False
-
- if flavor:
- old_extra_specs = flavor['extra_specs']
- if flavor['swap'] == "":
- flavor['swap'] = 0
- for param_key in ['ram', 'vcpus', 'disk', 'ephemeral',
- 'swap', 'rxtx_factor', 'is_public']:
- if self.params[param_key] != flavor[param_key]:
- require_update = True
- break
- flavorid = self.params['flavorid']
- if flavor and require_update:
- self.conn.delete_flavor(name)
- old_extra_specs = {}
- if flavorid == 'auto':
- flavorid = flavor['id']
- flavor = None
-
- if not flavor:
- flavor = self.conn.create_flavor(
- name=name,
- ram=self.params['ram'],
- vcpus=self.params['vcpus'],
- disk=self.params['disk'],
- flavorid=flavorid,
- ephemeral=self.params['ephemeral'],
- swap=self.params['swap'],
- rxtx_factor=self.params['rxtx_factor'],
- is_public=self.params['is_public']
- )
- changed = True
- else:
- changed = False
-
- new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
- unset_keys = set(old_extra_specs.keys()) - set(extra_specs.keys())
-
- if unset_keys and not require_update:
- self.conn.unset_flavor_specs(flavor['id'], unset_keys)
-
- if old_extra_specs != new_extra_specs:
- self.conn.set_flavor_specs(flavor['id'], extra_specs)
-
- changed = (changed or old_extra_specs != new_extra_specs)
-
- self.exit_json(
- changed=changed, flavor=flavor, id=flavor['id'])
-
- elif state == 'absent':
- if flavor:
- self.conn.delete_flavor(name)
- self.exit_json(changed=True)
+ self.exit_json(changed=self._will_change(state, flavor))
+
+ if state == 'present' and not flavor:
+ # Create flavor
+ flavor = self._create()
+ self.exit_json(changed=True,
+ flavor=flavor.to_dict(computed=False))
+
+ elif state == 'present' and flavor:
+ # Update flavor
+ update = self._build_update(flavor)
+ if update:
+ flavor = self._update(flavor, update)
+
+ self.exit_json(changed=bool(update),
+ flavor=flavor.to_dict(computed=False))
+
+ elif state == 'absent' and flavor:
+ # Delete flavor
+ self._delete(flavor)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not flavor:
+ # Do nothing
self.exit_json(changed=False)
+ def _build_update(self, flavor):
+ return {
+ **self._build_update_extra_specs(flavor),
+ **self._build_update_flavor(flavor)}
+
+ def _build_update_extra_specs(self, flavor):
+ update = {}
+
+ old_extra_specs = flavor['extra_specs']
+ new_extra_specs = self.params['extra_specs'] or {}
+ if flavor['swap'] == '':
+ flavor['swap'] = 0
+
+ delete_extra_specs_keys = \
+ set(old_extra_specs.keys()) - set(new_extra_specs.keys())
+
+ if delete_extra_specs_keys:
+ update['delete_extra_specs_keys'] = delete_extra_specs_keys
+
+ stringified = dict([(k, str(v))
+ for k, v in new_extra_specs.items()])
+
+ if old_extra_specs != stringified:
+ update['create_extra_specs'] = new_extra_specs
+
+ return update
+
+ def _build_update_flavor(self, flavor):
+ update = {}
+
+ flavor_attributes = dict(
+ (k, self.params[k])
+ for k in ['ram', 'vcpus', 'disk', 'ephemeral', 'swap',
+ 'rxtx_factor', 'is_public', 'description']
+ if k in self.params and self.params[k] is not None
+ and self.params[k] != flavor[k])
+
+ if flavor_attributes:
+ update['flavor_attributes'] = flavor_attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['name', 'ram', 'vcpus', 'disk', 'ephemeral',
+ 'swap', 'rxtx_factor', 'is_public',
+ 'description']
+ if self.params[k] is not None)
+
+ # Keep for backward compatibility
+ id = self.params['id']
+ if id is not None and id != 'auto':
+ kwargs['id'] = id
+
+ flavor = self.conn.compute.create_flavor(**kwargs)
+
+ extra_specs = self.params['extra_specs']
+ if extra_specs:
+ flavor = self.conn.compute.create_flavor_extra_specs(flavor.id,
+ extra_specs)
+
+ return flavor
+
+ def _delete(self, flavor):
+ self.conn.compute.delete_flavor(flavor)
+
+ def _update(self, flavor, update):
+ flavor = self._update_flavor(flavor, update)
+ flavor = self._update_extra_specs(flavor, update)
+ return flavor
+
+ def _update_extra_specs(self, flavor, update):
+ if update.get('flavor_attributes'):
+ # No need to update extra_specs since flavor will be recreated
+ return flavor
+
+ delete_extra_specs_keys = update.get('delete_extra_specs_keys')
+ if delete_extra_specs_keys:
+ self.conn.unset_flavor_specs(flavor.id, delete_extra_specs_keys)
+ # Update flavor after extra_specs removal
+ flavor = self.conn.compute.fetch_flavor_extra_specs(flavor)
+
+ create_extra_specs = update.get('create_extra_specs')
+ if create_extra_specs:
+ flavor = self.conn.compute.create_flavor_extra_specs(
+ flavor.id, create_extra_specs)
+
+ return flavor
+
+ def _update_flavor(self, flavor, update):
+ flavor_attributes = update.get('flavor_attributes')
+ if flavor_attributes:
+ # Because only flavor descriptions are updateable,
+ # flavor has to be recreated to "update" it
+ self._delete(flavor)
+ flavor = self._create()
+
+ return flavor
+
+ def _will_change(self, state, flavor):
+ if state == 'present' and not flavor:
+ return True
+ elif state == 'present' and flavor:
+ return bool(self._build_update(flavor))
+ elif state == 'absent' and flavor:
+ return True
+ else:
+ # state == 'absent' and not flavor:
+ return False
+
def main():
module = ComputeFlavorModule()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_access.py b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_access.py
new file mode 100644
index 000000000..20abc3e16
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_access.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: compute_flavor_access
+short_description: Manage access to OpenStack compute flavors
+author: OpenStack Ansible SIG
+description:
+ - Add or remove access to OpenStack compute flavor
+options:
+ name:
+ description:
+ - Name or ID of the compute flavor.
+ required: true
+ type: str
+ project:
+ description:
+ - ID or Name of project to grant.
+ - Allow I(project) to access private flavor (name or ID).
+ type: str
+ required: true
+ project_domain:
+ description:
+ - Domain the project belongs to (name or ID).
+ - This can be used in case collisions between project names exist.
+ type: str
+ state:
+ description:
+ - Indicate whether project should have access to compute flavor or not.
+ default: present
+ type: str
+ choices: ['present', 'absent']
+notes:
+ - A compute flavor must be private to manage project access.
+extends_documentation_fragment:
+- openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+- name: Grant access to tiny flavor
+ openstack.cloud.compute_flavor_access:
+ cloud: devstack
+ name: tiny
+ project: demo
+ state: present
+
+- name: Revoke access to compute flavor
+ openstack.cloud.compute_flavor_access:
+ cloud: devstack
+ name: tiny
+ project: demo
+ state: absent
+'''
+
+RETURN = '''
+flavor:
+ description: Dictionary describing the flavor.
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ description:
+ description: Description attached to flavor
+ returned: success
+ type: str
+ sample: Example description
+ disk:
+ description: Size of local disk, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ephemeral:
+ description: Ephemeral space size, in GB.
+ returned: success
+ type: int
+ sample: 10
+ extra_specs:
+ description: Flavor metadata
+ returned: success
+ type: dict
+ sample:
+ "quota:disk_read_iops_sec": 5000
+ "aggregate_instance_extra_specs:pinned": false
+ id:
+ description: Flavor ID.
+ returned: success
+ type: str
+ sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
+ is_disabled:
+ description: Whether the flavor is disabled
+ returned: success
+ type: bool
+ sample: true
+ is_public:
+ description: Make flavor accessible to the public.
+ returned: success
+ type: bool
+ sample: true
+ name:
+ description: Flavor name.
+ returned: success
+ type: str
+ sample: "tiny"
+ original_name:
+ description: The name of this flavor when returned by server list/show
+ type: str
+ returned: success
+ ram:
+ description: Amount of memory, in MB.
+ returned: success
+ type: int
+ sample: 1024
+ rxtx_factor:
+ description: |
+ The bandwidth scaling factor this flavor receives on the network
+ returned: success
+ type: int
+ sample: 100
+ swap:
+ description: Swap space size, in MB.
+ returned: success
+ type: int
+ sample: 100
+ vcpus:
+ description: Number of virtual CPUs.
+ returned: success
+ type: int
+ sample: 2
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class ComputeFlavorAccess(OpenStackModule):
+ argument_spec = dict(
+ name=dict(required=True),
+ project=dict(required=True),
+ project_domain=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
+
+ # TODO: Merge with equal function from volume_type_access module.
+ def _project_and_project_domain(self):
+ project_name_or_id = self.params['project']
+ project_domain_name_or_id = self.params['project_domain']
+
+ if project_domain_name_or_id:
+ domain_id = self.conn.identity.find_domain(
+ project_domain_name_or_id, ignore_missing=False).id
+ else:
+ domain_id = None
+
+ kwargs = dict() if domain_id is None else dict(domain_id=domain_id)
+
+ if project_name_or_id:
+ project_id = self.conn.identity.find_project(
+ project_name_or_id, ignore_missing=False, *kwargs).id
+ else:
+ project_id = None
+
+ return project_id, domain_id
+
+ def run(self):
+ name_or_id = self.params['name']
+ flavor = self.conn.compute.find_flavor(name_or_id,
+ ignore_missing=False)
+
+ state = self.params['state']
+ if state == 'present' and flavor.is_public:
+ raise ValueError('access can only be granted to private flavors')
+
+ project_id, domain_id = self._project_and_project_domain()
+
+ flavor_access = self.conn.compute.get_flavor_access(flavor.id)
+ project_ids = [access.get('tenant_id') for access in flavor_access]
+
+ if (project_id in project_ids and state == 'present') \
+ or (project_id not in project_ids and state == 'absent'):
+ self.exit_json(changed=False,
+ flavor=flavor.to_dict(computed=False))
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=True, flavor=flavor.to_dict(computed=False))
+
+ if project_id in project_ids: # and state == 'absent'
+ self.conn.compute.flavor_remove_tenant_access(flavor.id,
+ project_id)
+ else: # project_id not in project_ids and state == 'present'
+ self.conn.compute.flavor_add_tenant_access(flavor.id, project_id)
+
+ self.exit_json(changed=True, flavor=flavor.to_dict(computed=False))
+
+
+def main():
+ module = ComputeFlavorAccess()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_info.py b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_info.py
index 61ee7a5b7..0fc240542 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/compute_flavor_info.py
@@ -1,184 +1,155 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: compute_flavor_info
-short_description: Retrieve information about one or more flavors
+short_description: Fetch compute flavors from OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Retrieve information about available OpenStack instance flavors. By default,
- information about ALL flavors are retrieved. Filters can be applied to get
- information for only matching flavors. For example, you can filter on the
- amount of RAM available to the flavor, or the number of virtual CPUs
- available to the flavor, or both. When specifying multiple filters,
- *ALL* filters must match on a flavor before that flavor is returned as
- a fact.
- - This module was called C(openstack.cloud.compute_flavor_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.compute_flavor_info) module no longer returns C(ansible_facts)!
-notes:
- - The result contains a list of unsorted flavors.
+ - Fetch OpenStack compute flavors.
options:
- name:
- description:
- - A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
- type: str
- ram:
- description:
- - "A string used for filtering flavors based on the amount of RAM
+ ephemeral:
+ description:
+ - Filter flavors based on the amount of ephemeral storage.
+ - I(ephemeral) supports same format as I(ram) option.
+ type: str
+ limit:
+ description:
+ - Limits number of flavors to I(limit) results.
+ - By default all matching flavors are returned.
+ type: int
+ name:
+ description:
+ - Flavor name.
+ type: str
+ ram:
+ description:
+ - "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
-
- - "A specific amount of RAM may also be specified. Any flavors with this
+ - "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
-
- - "A range of acceptable RAM may be given using a special syntax. Simply
+ - "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
- type: str
- vcpus:
- description:
- - A string used for filtering flavors based on the number of virtual
- CPUs desired. Format is the same as the I(ram) parameter.
- type: str
- limit:
- description:
- - Limits the number of flavors returned. All matching flavors are
- returned by default.
- type: int
- ephemeral:
- description:
- - A string used for filtering flavors based on the amount of ephemeral
- storage. Format is the same as the I(ram) parameter
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ type: str
+ vcpus:
+ description:
+ - Filter flavors based on the number of virtual CPUs.
+ - I(vcpus) supports same format as I(ram) option.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about all available flavors
-- openstack.cloud.compute_flavor_info:
+EXAMPLES = r'''
+- name: Gather information about all available flavors
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
- register: result
-- debug:
- msg: "{{ result.openstack_flavors }}"
-
-# Gather information for the flavor named "xlarge-flavor"
-- openstack.cloud.compute_flavor_info:
+- name: Gather information for the flavor named "xlarge-flavor"
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
name: "xlarge-flavor"
-# Get all flavors that have exactly 512 MB of RAM.
-- openstack.cloud.compute_flavor_info:
+- name: Get all flavors with 512 MB of RAM
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
ram: "512"
-# Get all flavors that have 1024 MB or more of RAM.
-- openstack.cloud.compute_flavor_info:
+- name: Get all flavors with >= 1024 MB RAM
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
ram: ">=1024"
-# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
-# option will guarantee only a single flavor is returned.
-- openstack.cloud.compute_flavor_info:
+- name: Get a single flavor with minimum amount of RAM
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
ram: "MIN"
limit: 1
-# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
-- openstack.cloud.compute_flavor_info:
+- name: Get all flavors with >=1024 MB RAM and 2 vCPUs
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
-# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
-# less than 30gb of ephemeral storage.
-- openstack.cloud.compute_flavor_info:
+- name: Get flavors with >= 1024 MB RAM 2 vCPUs and < 30gb ephemeral storage
+ openstack.cloud.compute_flavor_info:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
-
-RETURN = '''
-openstack_flavors:
- description: Dictionary describing the flavors.
- returned: On success.
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
- description:
- description: Description of the flavor
- returned: success
- type: str
- sample: "Small flavor"
- is_disabled:
- description: Wether the flavor is enabled or not
- returned: success
- type: bool
- sample: False
- rxtx_factor:
- description: Factor to be multiplied by the rxtx_base property of
- the network it is attached to in order to have a
- different bandwidth cap.
- returned: success
- type: float
- sample: 1.0
- extra_specs:
- description: Optional parameters to configure different flavors
- options.
- returned: success
- type: dict
- sample: "{'hw_rng:allowed': True}"
- disk:
- description: Size of local disk, in GB.
- returned: success
- type: int
- sample: 10
- ephemeral:
- description: Ephemeral space size, in GB.
- returned: success
- type: int
- sample: 10
- ram:
- description: Amount of memory, in MB.
- returned: success
- type: int
- sample: 1024
- swap:
- description: Swap space size, in MB.
- returned: success
- type: int
- sample: 100
- vcpus:
- description: Number of virtual CPUs.
- returned: success
- type: int
- sample: 2
- is_public:
- description: Make flavor accessible to the public.
- returned: success
- type: bool
- sample: true
+RETURN = r'''
+flavors:
+ description: List of dictionaries describing the compute flavors.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ description:
+ description: Description of the flavor
+ type: str
+ sample: "Small flavor"
+ disk:
+ description: Size of local disk, in GB.
+ type: int
+ sample: 10
+ ephemeral:
+ description: Ephemeral space size, in GB.
+ type: int
+ sample: 10
+ extra_specs:
+ description: Optional parameters to configure different flavors
+ options.
+ type: dict
+ sample: "{'hw_rng:allowed': True}"
+ id:
+ description: Flavor ID.
+ type: str
+ sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
+ is_disabled:
+ description: Wether the flavor is enabled or not
+ type: bool
+ sample: False
+ is_public:
+ description: Make flavor accessible to the public.
+ type: bool
+ sample: true
+ name:
+ description: Flavor name.
+ type: str
+ sample: "tiny"
+ original_name:
+ description: Original flavor name
+ type: str
+ sample: "tiny"
+ ram:
+ description: Amount of memory, in MB.
+ type: int
+ sample: 1024
+ rxtx_factor:
+ description: Factor to be multiplied by the rxtx_base property of
+ the network it is attached to in order to have a
+ different bandwidth cap.
+ type: float
+ sample: 1.0
+ swap:
+ description: Swap space size, in MB.
+ type: int
+ sample: 100
+ vcpus:
+ description: Number of virtual CPUs.
+ type: int
+ sample: 2
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -186,56 +157,39 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ComputeFlavorInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, default=None),
- ram=dict(required=False, default=None),
- vcpus=dict(required=False, default=None),
- limit=dict(required=False, default=None, type='int'),
- ephemeral=dict(required=False, default=None),
+ ephemeral=dict(),
+ limit=dict(type='int'),
+ name=dict(),
+ ram=dict(),
+ vcpus=dict(),
)
+
module_kwargs = dict(
- mutually_exclusive=[
- ['name', 'ram'],
- ['name', 'vcpus'],
- ['name', 'ephemeral']
- ],
supports_check_mode=True
)
- deprecated_names = ('openstack.cloud.compute_flavor_facts')
-
def run(self):
name = self.params['name']
- vcpus = self.params['vcpus']
- ram = self.params['ram']
- ephemeral = self.params['ephemeral']
- limit = self.params['limit']
- filters = {}
- if vcpus:
- filters['vcpus'] = vcpus
- if ram:
- filters['ram'] = ram
- if ephemeral:
- filters['ephemeral'] = ephemeral
+ filters = dict((k, self.params[k])
+ for k in ['ephemeral', 'ram', 'vcpus']
+ if self.params[k] is not None)
if name:
- # extra_specs are exposed in the flavor representation since Rocky, so we do not
- # need get_extra_specs=True which is not available in OpenStack SDK 0.36 (Train)
- # Ref.: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html
flavor = self.conn.compute.find_flavor(name)
flavors = [flavor] if flavor else []
-
else:
flavors = list(self.conn.compute.flavors())
- if filters:
- flavors = self.conn.range_search(flavors, filters)
+ if filters:
+ flavors = self.conn.range_search(flavors, filters)
+
+ limit = self.params['limit']
if limit is not None:
flavors = flavors[:limit]
- # Transform entries to dict
- flavors = [flavor.to_dict(computed=True) for flavor in flavors]
- self.exit_json(changed=False, openstack_flavors=flavors)
+ self.exit_json(changed=False,
+ flavors=[f.to_dict(computed=False) for f in flavors])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/compute_service_info.py b/ansible_collections/openstack/cloud/plugins/modules/compute_service_info.py
index 6665dd63e..f3ad0794b 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/compute_service_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/compute_service_info.py
@@ -1,96 +1,78 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: compute_service_info
-short_description: Retrieve information about one or more OpenStack compute services
+short_description: Fetch OpenStack Compute (Nova) services
author: OpenStack Ansible SIG
description:
- - Retrieve information about nova compute services
+ - Fetch OpenStack Compute (Nova) services.
options:
- binary:
- description:
- - Filter by service binary type. Requires openstacksdk>=0.53.
- type: str
- host:
- description:
- - Filter by service host. Requires openstacksdk>=0.53.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ binary:
+ description:
+ - Filter the service list result by binary name of the service.
+ type: str
+ host:
+ description:
+ - Filter the service list result by the host name.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about compute services
-- openstack.cloud.compute_service_info:
+EXAMPLES = r'''
+- name: Fetch all OpenStack Compute (Nova) services
+ openstack.cloud.compute_service_info:
+ cloud: awesomecloud
+
+- name: Fetch a subset of OpenStack Compute (Nova) services
+ openstack.cloud.compute_service_info:
cloud: awesomecloud
binary: "nova-compute"
host: "localhost"
- register: result
-- openstack.cloud.compute_service_info:
- cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_compute_services }}"
'''
-
-RETURN = '''
-openstack_compute_services:
- description: has all the OpenStack information about compute services
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- binary:
- description: The binary name of the service.
- returned: success
- type: str
- host:
- description: The name of the host.
- returned: success
- type: str
- disabled_reason:
- description: The reason why the service is disabled
- returned: success and OpenStack SDK >= 0.53
- type: str
- disables_reason:
- description: The reason why the service is disabled
- returned: success and OpenStack SDK < 0.53
- type: str
- availability_zone:
- description: The availability zone name.
- returned: success
- type: str
- is_forced_down:
- description: If the service has been forced down or nova-compute
- returned: success
- type: bool
- name:
- description: Service name
- returned: success
- type: str
- status:
- description: The status of the service. One of enabled or disabled.
- returned: success
- type: str
- state:
- description: The state of the service. One of up or down.
- returned: success
- type: str
- update_at:
- description: The date and time when the resource was updated
- returned: success
- type: str
+RETURN = r'''
+compute_services:
+ description: List of dictionaries describing Compute (Nova) services.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ availability_zone:
+ description: The availability zone name.
+ type: str
+ binary:
+ description: The binary name of the service.
+ type: str
+ disabled_reason:
+ description: The reason why the service is disabled
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_forced_down:
+ description: If the service has been forced down or nova-compute
+ type: bool
+ host:
+ description: The name of the host.
+ type: str
+ name:
+ description: Service name
+ type: str
+ state:
+ description: The state of the service. One of up or down.
+ type: str
+ status:
+ description: The status of the service. One of enabled or disabled.
+ type: str
+ update_at:
+ description: The date and time when the resource was updated
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -98,19 +80,23 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ComputeServiceInfoModule(OpenStackModule):
argument_spec = dict(
- binary=dict(required=False, default=None, min_ver='0.53.0'),
- host=dict(required=False, default=None, min_ver='0.53.0'),
+ binary=dict(),
+ host=dict(),
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- filters = self.check_versioned(binary=self.params['binary'], host=self.params['host'])
- filters = {k: v for k, v in filters.items() if v is not None}
- services = self.conn.compute.services(**filters)
- services = [service.to_dict(computed=True) for service in services]
- self.exit_json(changed=False, openstack_compute_services=services)
+ kwargs = {k: self.params[k]
+ for k in ['binary', 'host']
+ if self.params[k] is not None}
+ compute_services = self.conn.compute.services(**kwargs)
+
+ self.exit_json(changed=False,
+ compute_services=[s.to_dict(computed=False)
+ for s in compute_services])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/config.py b/ansible_collections/openstack/cloud/plugins/modules/config.py
index 94036e499..478555efe 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/config.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/config.py
@@ -1,45 +1,68 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: config
short_description: Get OpenStack Client config
+author: OpenStack Ansible SIG
description:
- - Get I(openstack) client config data from clouds.yaml or environment
-notes:
- - Facts are placed in the C(openstack.clouds) variable.
+ - Get OpenStack cloud credentials and configuration,
+ e.g. from clouds.yaml and environment variables.
options:
clouds:
description:
- - List of clouds to limit the return list to. No value means return
- information on all configured clouds
- required: false
+ - List of clouds to limit the return list to.
+ - When I(clouds) is not defined, then data
+ is returned for all configured clouds.
default: []
type: list
elements: str
requirements:
- - "python >= 3.6"
- - "openstacksdk"
-author: OpenStack Ansible SIG
+ - "python >= 3.6"
+ - "openstacksdk >= 1.0.0"
+'''
+
+RETURN = r'''
+clouds:
+ description: List of OpenStack cloud configurations.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: Name of the cloud.
+ type: str
+ config:
+ description: A dict of configuration values for the CloudRegion and
+ its services. The key for a ${config_option} for a
+ specific ${service} should be ${service}_${config_option}.
+ type: dict
'''
-EXAMPLES = '''
-- name: Get list of clouds that do not support security groups
+EXAMPLES = r'''
+- name: Read configuration of all defined clouds
openstack.cloud.config:
+ register: config
-- debug:
- var: "{{ item }}"
- with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}"
+- name: Print clouds which do not support security groups
+ loop: "{{ config.clouds }}"
+ when: item.config.secgroup_source|default(None) != None
+ debug:
+ var: item
-- name: Get the information back just about the mordred cloud
+- name: Read configuration of a two specific clouds
openstack.cloud.config:
clouds:
+ - devstack
- mordred
'''
+from ansible.module_utils.basic import AnsibleModule
+
try:
import openstack.config
from openstack import exceptions
@@ -47,28 +70,26 @@ try:
except ImportError:
HAS_OPENSTACKSDK = False
-from ansible.module_utils.basic import AnsibleModule
-
def main():
- module = AnsibleModule(argument_spec=dict(
- clouds=dict(required=False, type='list', default=[], elements='str'),
- ))
+ module = AnsibleModule(
+ argument_spec=dict(
+ clouds=dict(type='list', default=[], elements='str'),
+ )
+ )
if not HAS_OPENSTACKSDK:
module.fail_json(msg='openstacksdk is required for this module')
- p = module.params
-
try:
- config = openstack.config.OpenStackConfig()
- clouds = []
- for cloud in config.get_all_clouds():
- if not p['clouds'] or cloud.name in p['clouds']:
- cloud.config['name'] = cloud.name
- clouds.append(cloud.config)
- module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
- except exceptions.ConfigException as e:
+ clouds = [dict(name=cloud.name, config=cloud.config)
+ for cloud in openstack.config.OpenStackConfig().get_all()
+ if not module.params['clouds']
+ or cloud.name in module.params['clouds']]
+
+ module.exit_json(changed=False, clouds=clouds)
+
+ except exceptions.SDKException as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/openstack/cloud/plugins/modules/container.py b/ansible_collections/openstack/cloud/plugins/modules/container.py
deleted file mode 100644
index 23ed38e54..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/container.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-#
-# Copyright (c) 2021 by Open Telekom Cloud, operated by T-Systems International GmbH
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: object_container
-short_description: Manage Swift container.
-author: OpenStack Ansible SIG
-description:
- - Manage Swift container.
-options:
- container:
- description: Name of a container in Swift.
- type: str
- required: true
- metadata:
- description:
- - Key/value pairs to be set as metadata on the container.
- - If a container doesn't exist, it will be created.
- - Both custom and system metadata can be set.
- - Custom metadata are keys and values defined by the user.
- - The system metadata keys are content_type, content_encoding, content_disposition, delete_after,\
- delete_at, is_content_type_detected
- type: dict
- required: false
- keys:
- description: Keys from 'metadata' to be deleted.
- type: list
- elements: str
- required: false
- delete_with_all_objects:
- description:
- - Whether the container should be deleted with all objects or not.
- - Without this parameter set to "true", an attempt to delete a container that contains objects will fail.
- type: bool
- default: False
- required: false
- state:
- description: Whether resource should be present or absent.
- default: 'present'
- choices: ['present', 'absent']
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-container:
- description: Specifies the container.
- returned: On success when C(state=present)
- type: dict
- sample:
- {
- "bytes": 5449,
- "bytes_used": 5449,
- "content_type": null,
- "count": 1,
- "id": "otc",
- "if_none_match": null,
- "is_content_type_detected": null,
- "is_newest": null,
- "meta_temp_url_key": null,
- "meta_temp_url_key_2": null,
- "name": "otc",
- "object_count": 1,
- "read_ACL": null,
- "sync_key": null,
- "sync_to": null,
- "timestamp": null,
- "versions_location": null,
- "write_ACL": null
- }
-'''
-
-EXAMPLES = '''
-# Create empty container
- - openstack.cloud.object_container:
- container: "new-container"
- state: present
-
-# Set metadata for container
- - openstack.cloud.object_container:
- container: "new-container"
- metadata: "Cache-Control='no-cache'"
-
-# Delete some keys from metadata of a container
- - openstack.cloud.object_container:
- container: "new-container"
- keys:
- - content_type
-
-# Delete container
- - openstack.cloud.object_container:
- container: "new-container"
- state: absent
-
-# Delete container and its objects
- - openstack.cloud.object_container:
- container: "new-container"
- delete_with_all_objects: true
- state: absent
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ContainerModule(OpenStackModule):
-
- argument_spec = dict(
- container=dict(type='str', required=True),
- metadata=dict(type='dict', required=False),
- keys=dict(type='list', required=False, elements='str', no_log=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- delete_with_all_objects=dict(type='bool', default=False, required=False)
- )
-
- def create(self, container):
-
- data = {}
- if self._container_exist(container):
- self.exit_json(changed=False)
-
- container_data = self.conn.object_store.create_container(name=container).to_dict()
- container_data.pop('location')
- data['container'] = container_data
- self.exit_json(changed=True, **data)
-
- def delete(self, container):
-
- delete_with_all_objects = self.params['delete_with_all_objects']
-
- changed = False
- if self._container_exist(container):
- objects = []
- for raw in self.conn.object_store.objects(container):
- dt = raw.to_dict()
- dt.pop('location')
- objects.append(dt)
- if len(objects) > 0:
- if delete_with_all_objects:
- for obj in objects:
- self.conn.object_store.delete_object(container=container, obj=obj['id'])
- else:
- self.fail_json(msg="Container has objects")
- self.conn.object_store.delete_container(container=container)
- changed = True
-
- self.exit(changed=changed)
-
- def set_metadata(self, container, metadata):
-
- data = {}
-
- if not self._container_exist(container):
- new_container = self.conn.object_store.create_container(name=container).to_dict()
-
- new_container = self.conn.object_store.set_container_metadata(container, **metadata).to_dict()
- new_container.pop('location')
- data['container'] = new_container
- self.exit(changed=True, **data)
-
- def delete_metadata(self, container, keys):
-
- if not self._container_exist(container):
- self.fail_json(msg="Container doesn't exist")
-
- self.conn.object_store.delete_container_metadata(container=container, keys=keys)
- self.exit(changed=True)
-
- def _container_exist(self, container):
- try:
- self.conn.object_store.get_container_metadata(container)
- return True
- except self.sdk.exceptions.ResourceNotFound:
- return False
-
- def run(self):
-
- container = self.params['container']
- state = self.params['state']
- metadata = self.params['metadata']
- keys = self.params['keys']
-
- if state == 'absent':
- self.delete(container)
- if metadata:
- self.set_metadata(container, metadata)
- if keys:
- self.delete_metadata(container, keys)
-
- self.create(container)
-
-
-def main():
- module = ContainerModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/dns_zone.py b/ansible_collections/openstack/cloud/plugins/modules/dns_zone.py
index 98cf655e3..3464b61cf 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/dns_zone.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/dns_zone.py
@@ -1,117 +1,161 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: dns_zone
-short_description: Manage OpenStack DNS zones
+short_description: Manage a OpenStack DNS zone.
author: OpenStack Ansible SIG
description:
- - Manage OpenStack DNS zones. Zones can be created, deleted or
- updated. Only the I(email), I(description), I(ttl) and I(masters) values
- can be updated.
+ - Create, delete or update a OpenStack DNS zone.
options:
- name:
- description:
- - Zone name
- required: true
- type: str
- zone_type:
- description:
- - Zone type
- choices: [primary, secondary]
- type: str
- email:
- description:
- - Email of the zone owner (only applies if zone_type is primary)
- type: str
- description:
- description:
- - Zone description
- type: str
- ttl:
- description:
- - TTL (Time To Live) value in seconds
- type: int
- masters:
- description:
- - Master nameservers (only applies if zone_type is secondary)
- type: list
- elements: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Zone description.
+ type: str
+ email:
+ description:
+ - Email of the zone owner.
+ - Only applies if I(type) is C(primary).
+ type: str
+ masters:
+ description:
+ - Master nameservers
+ - Only applies if I(type) is C(secondary).
+ type: list
+ elements: str
+ name:
+ description:
+ - Name of the DNS zone.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the zone should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds.
+ type: int
+ type:
+ description:
+ - Zone type.
+ - This attribute cannot be updated.
+ choices: ['primary', 'secondary']
+ type: str
+ aliases: ['zone_type']
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a zone named "example.net"
-- openstack.cloud.dns_zone:
+EXAMPLES = r'''
+- name: Create DNS zone example.net.
+ openstack.cloud.dns_zone:
cloud: mycloud
state: present
name: example.net.
- zone_type: primary
+ type: primary
email: test@example.net
description: Test zone
ttl: 3600
-# Update the TTL on existing "example.net." zone
-- openstack.cloud.dns_zone:
+- name: Set TTL on DNS zone example.net.
+ openstack.cloud.dns_zone:
cloud: mycloud
state: present
name: example.net.
ttl: 7200
-# Delete zone named "example.net."
-- openstack.cloud.dns_zone:
+- name: Delete zone example.net.
+ openstack.cloud.dns_zone:
cloud: mycloud
state: absent
name: example.net.
'''
-RETURN = '''
+RETURN = r'''
zone:
- description: Dictionary describing the zone.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- id:
- description: Unique zone ID
- type: str
- sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
- name:
- description: Zone name
- type: str
- sample: "example.net."
- type:
- description: Zone type
- type: str
- sample: "PRIMARY"
- email:
- description: Zone owner email
- type: str
- sample: "test@example.net"
- description:
- description: Zone description
- type: str
- sample: "Test description"
- ttl:
- description: Zone TTL value
- type: int
- sample: 3600
- masters:
- description: Zone master nameservers
- type: list
- sample: []
+ description: Dictionary describing the zone.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ action:
+ description: Current action in progress on the resource.
+ type: str
+ sample: "CREATE"
+ attributes:
+ description: Key value pairs of information about this zone, and the
+ pool the user would like to place the zone in. This
+ information can be used by the scheduler to place zones on
+ the correct pool.
+ type: dict
+ sample: {"tier": "gold", "ha": "true"}
+ created_at:
+ description: Date / Time when resource was created.
+ type: str
+ sample: "2014-07-07T18:25:31.275934"
+ description:
+ description: Description for this zone.
+ type: str
+ sample: "This is an example zone."
+ email:
+ description: E-mail for the zone. Used in SOA records for the zone.
+ type: str
+ sample: "test@example.org"
+ id:
+ description: ID for the resource.
+ type: int
+ sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
+ links:
+ description: Links to the resource, and other related resources. When a
+ response has been broken into pages, we will include a next
+ link that should be followed to retrieve all results.
+ type: dict
+ sample: {"self": "https://127.0.0.1:9001/v2/zones/a86dba...d5e86f3ca3"}
+ masters:
+ description: The servers to slave from to get DNS information.
+ Mandatory for secondary zones.
+ type: list
+ sample: "[]"
+ name:
+ description: DNS Name for the zone.
+ type: str
+ sample: "test.test."
+ pool_id:
+ description: ID for the pool hosting this zone.
+ type: str
+ sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
+ project_id:
+ description: ID for the project that owns the resource.
+ type: str
+ sample: "4335d1f0-f793-11e2-b778-0800200c9a66"
+ serial:
+ description: Current serial number for the zone.
+ type: int
+ sample: 1404757531
+ status:
+ description: Status of the resource.
+ type: str
+ sample: "ACTIVE"
+ ttl:
+ description: TTL (Time to Live) for the zone.
+ type: int
+ sample: 7200
+ type:
+ description: Type of zone. PRIMARY is controlled by Designate,
+ SECONDARY zones are slaved from another DNS Server.
+ Defaults to PRIMARY.
+ type: str
+ sample: "PRIMARY"
+ updated_at:
+ description: Date / Time when resource last updated.
+ type: str
+ sample: "2014-07-07T18:25:31.275934"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -120,119 +164,116 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class DnsZoneModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=True, type='str'),
- zone_type=dict(required=False, choices=['primary', 'secondary'], type='str'),
- email=dict(required=False, type='str'),
- description=dict(required=False, type='str'),
- ttl=dict(required=False, type='int'),
- masters=dict(required=False, type='list', elements='str'),
- state=dict(default='present', choices=['absent', 'present'], type='str'),
+ description=dict(),
+ email=dict(),
+ masters=dict(type='list', elements='str'),
+ name=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ ttl=dict(type='int'),
+ type=dict(choices=['primary', 'secondary'], aliases=['zone_type']),
)
- def _system_state_change(self, state, email, description, ttl, masters, zone):
- if state == 'present':
- if not zone:
- return True
- if email is not None and zone.email != email:
- return True
- if description is not None and zone.description != description:
- return True
- if ttl is not None and zone.ttl != ttl:
- return True
- if masters is not None and zone.masters != masters:
- return True
- if state == 'absent' and zone:
- return True
- return False
+ def run(self):
+ state = self.params['state']
+ name_or_id = self.params['name']
- def _wait(self, timeout, zone, state):
- """Wait for a zone to reach the desired state for the given state."""
+ zone = self.conn.dns.find_zone(name_or_id=name_or_id)
- for count in self.sdk.utils.iterate_timeout(
- timeout,
- "Timeout waiting for zone to be %s" % state):
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, zone))
- if (state == 'absent' and zone is None) or (state == 'present' and zone and zone.status == 'ACTIVE'):
- return
+ if state == 'present' and not zone:
+ # Create zone
+ zone = self._create()
+ self.exit_json(changed=True,
+ zone=zone.to_dict(computed=False))
- try:
- zone = self.conn.get_zone(zone.id)
- except Exception:
- continue
+ elif state == 'present' and zone:
+ # Update zone
+ update = self._build_update(zone)
+ if update:
+ zone = self._update(zone, update)
- if zone and zone.status == 'ERROR':
- self.fail_json(msg="Zone reached ERROR state while waiting for it to be %s" % state)
+ self.exit_json(changed=bool(update),
+ zone=zone.to_dict(computed=False))
- def run(self):
+ elif state == 'absent' and zone:
+ # Delete zone
+ self._delete(zone)
+ self.exit_json(changed=True)
- name = self.params['name']
- state = self.params['state']
- wait = self.params['wait']
- timeout = self.params['timeout']
-
- zone = self.conn.get_zone(name)
-
- if state == 'present':
-
- zone_type = self.params['zone_type']
- email = self.params['email']
- description = self.params['description']
- ttl = self.params['ttl']
- masters = self.params['masters']
-
- kwargs = {}
-
- if email:
- kwargs['email'] = email
- if description:
- kwargs['description'] = description
- if ttl:
- kwargs['ttl'] = ttl
- if masters:
- kwargs['masters'] = masters
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, email,
- description, ttl,
- masters, zone))
-
- if zone is None:
- zone = self.conn.create_zone(
- name=name, zone_type=zone_type, **kwargs)
- changed = True
- else:
- if masters is None:
- masters = []
-
- pre_update_zone = zone
- changed = self._system_state_change(state, email,
- description, ttl,
- masters, pre_update_zone)
- if changed:
- zone = self.conn.update_zone(
- name, **kwargs)
-
- if wait:
- self._wait(timeout, zone, state)
-
- self.exit_json(changed=changed, zone=zone)
-
- elif state == 'absent':
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, None,
- None, None,
- None, zone))
-
- if zone is None:
- changed = False
- else:
- self.conn.delete_zone(name)
- changed = True
-
- if wait:
- self._wait(timeout, zone, state)
-
- self.exit_json(changed=changed)
+ elif state == 'absent' and not zone:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, zone):
+ update = {}
+
+ attributes = dict((k, self.params[k])
+ for k in ['description', 'email', 'masters', 'ttl']
+ if self.params[k] is not None
+ and self.params[k] != zone[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'email', 'masters', 'name',
+ 'ttl', 'type']
+ if self.params[k] is not None)
+
+ if 'type' in kwargs:
+ # designate expects upper case PRIMARY or SECONDARY
+ kwargs['type'] = kwargs['type'].upper()
+
+ zone = self.conn.dns.create_zone(**kwargs)
+
+ if self.params['wait']:
+ self.sdk.resource.wait_for_status(
+ self.conn.dns, zone,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'])
+
+ return zone
+
+ def _delete(self, zone):
+ self.conn.dns.delete_zone(zone.id)
+
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=self.params['timeout'],
+ message="Timeout waiting for zone to be absent"
+ ):
+ if self.conn.dns.find_zone(zone.id) is None:
+ break
+
+ def _update(self, zone, update):
+ attributes = update.get('attributes')
+ if attributes:
+ zone = self.conn.dns.update_zone(zone.id, **attributes)
+
+ if self.params['wait']:
+ self.sdk.resource.wait_for_status(
+ self.conn.dns, zone,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'])
+
+ return zone
+
+ def _will_change(self, state, zone):
+ if state == 'present' and not zone:
+ return True
+ elif state == 'present' and zone:
+ return bool(self._build_update(zone))
+ elif state == 'absent' and zone:
+ return True
+ else:
+ # state == 'absent' and not zone:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/dns_zone_info.py b/ansible_collections/openstack/cloud/plugins/modules/dns_zone_info.py
index 22a3da5c7..ed7b67d7e 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/dns_zone_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/dns_zone_info.py
@@ -1,11 +1,11 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: dns_zone_info
short_description: Getting information about dns zones
@@ -13,115 +13,122 @@ author: OpenStack Ansible SIG
description:
- Getting information about dns zones. Output can be filtered.
options:
- name:
- description:
- - Zone name.
- type: str
- type:
- description:
- - Zone type.
- choices: [primary, secondary]
- type: str
- email:
- description:
- - Email of the zone owner (only applies if zone_type is primary).
- type: str
- description:
- description:
- - Zone description.
- type: str
- ttl:
- description:
- - TTL (Time To Live) value in seconds.
- type: int
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Zone description.
+ type: str
+ email:
+ description:
+ - Email of the zone owner
+ - Only applies to primary zones.
+ type: str
+ name:
+ description:
+ - Zone name.
+ type: str
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds.
+ type: int
+ type:
+ description:
+ - Zone type.
+ choices: ['primary', 'secondary']
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a zone named "example.net"
-- openstack.cloud.dns_zones:
+EXAMPLES = r'''
+- name: Fetch all DNS zones
+ openstack.cloud.dns_zones:
+ cloud: devstack
+- name: Fetch DNS zones by name
+ openstack.cloud.dns_zones:
+ cloud: devstack
+ name: ansible.test.zone.
'''
-RETURN = '''
-zone:
- description: Dictionary describing the zone.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- action:
- description: Current action in progress on the resource.
- type: str
- sample: "CREATE"
- attributes:
- description: Key:Value pairs of information about this zone, and the pool the user would like to place \
- the zone in. This information can be used by the scheduler to place zones on the correct pool.
- type: dict
- sample: {"tier": "gold", "ha": "true"}
- created_at:
- description: Date / Time when resource was created.
- type: str
- sample: "2014-07-07T18:25:31.275934"
- description:
- description: Description for this zone.
- type: str
- sample: "This is an example zone."
- email:
- description: E-mail for the zone. Used in SOA records for the zone.
- type: str
- sample: "test@example.org"
- id:
- description: ID for the resource.
- type: int
- sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
- links:
- description: Links to the resource, and other related resources. When a response has been broken into\
- pages, we will include a next link that should be followed to retrieve all results.
- type: dict
- sample: {"self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"}
- masters:
- description: Mandatory for secondary zones. The servers to slave from to get DNS information.
- type: list
- sample: "[]"
- name:
- description: DNS Name for the zone.
- type: str
- sample: "test.test."
- pool_id:
- description: ID for the pool hosting this zone.
- type: str
- sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
- project_id:
- description: ID for the project that owns the resource.
- type: str
- sample: "4335d1f0-f793-11e2-b778-0800200c9a66"
- serial:
- description: Current serial number for the zone.
- type: int
- sample: 1404757531
- status:
- description: Status of the resource.
- type: str
- sample: "ACTIVE"
- ttl:
- description: TTL (Time to Live) for the zone.
- type: int
- sample: 7200
- type:
- description: Type of zone. PRIMARY is controlled by Designate, SECONDARY zones are slaved from another\
- DNS Server. Defaults to PRIMARY
- type: str
- sample: "PRIMARY"
- updated_at:
- description: Date / Time when resource last updated.
- type: str
- sample: "2014-07-07T18:25:31.275934"
+RETURN = r'''
+zones:
+ description: Dictionary describing the DNS zones.
+ returned: On success when I(state) is C(present).
+ type: list
+ elements: dict
+ contains:
+ action:
+ description: Current action in progress on the resource.
+ type: str
+ sample: "CREATE"
+ attributes:
+ description: Key value pairs of information about this zone, and the
+ pool the user would like to place the zone in. This
+ information can be used by the scheduler to place zones on
+ the correct pool.
+ type: dict
+ sample: {"tier": "gold", "ha": "true"}
+ created_at:
+ description: Date / Time when resource was created.
+ type: str
+ sample: "2014-07-07T18:25:31.275934"
+ description:
+ description: Description for this zone.
+ type: str
+ sample: "This is an example zone."
+ email:
+ description: E-mail for the zone. Used in SOA records for the zone.
+ type: str
+ sample: "test@example.org"
+ id:
+ description: ID for the resource.
+ type: int
+ sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
+ links:
+ description: Links to the resource, and other related resources. When a
+ response has been broken into pages, we will include a next
+ link that should be followed to retrieve all results.
+ type: dict
+ sample: {"self": "https://127.0.0.1:9001/v2/zones/a86dba...d5e86f3ca3"}
+ masters:
+ description: The servers to slave from to get DNS information.
+ Mandatory for secondary zones.
+ type: list
+ sample: "[]"
+ name:
+ description: DNS Name for the zone.
+ type: str
+ sample: "test.test."
+ pool_id:
+ description: ID for the pool hosting this zone.
+ type: str
+ sample: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3"
+ project_id:
+ description: ID for the project that owns the resource.
+ type: str
+ sample: "4335d1f0-f793-11e2-b778-0800200c9a66"
+ serial:
+ description: Current serial number for the zone.
+ type: int
+ sample: 1404757531
+ status:
+ description: Status of the resource.
+ type: str
+ sample: "ACTIVE"
+ ttl:
+ description: TTL (Time to Live) for the zone.
+ type: int
+ sample: 7200
+ type:
+ description: Type of zone. PRIMARY is controlled by Designate,
+ SECONDARY zones are slaved from another DNS Server.
+ Defaults to PRIMARY.
+ type: str
+ sample: "PRIMARY"
+ updated_at:
+ description: Date / Time when resource last updated.
+ type: str
+ sample: "2014-07-07T18:25:31.275934"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -130,41 +137,27 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class DnsZoneInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, type='str'),
- type=dict(required=False, choices=['primary', 'secondary'], type='str'),
- email=dict(required=False, type='str'),
- description=dict(required=False, type='str'),
- ttl=dict(required=False, type='int')
+ description=dict(),
+ email=dict(),
+ name=dict(),
+ ttl=dict(type='int'),
+ type=dict(choices=['primary', 'secondary']),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'email', 'name',
+ 'ttl', 'type']
+ if self.params[k] is not None)
+
+ zones = self.conn.dns.zones(**kwargs)
- name = self.params['name']
- type = self.params['type']
- email = self.params['email']
- description = self.params['description']
- ttl = self.params['ttl']
-
- kwargs = {}
-
- if name:
- kwargs['name'] = name
- if type:
- kwargs['type'] = type
- if email:
- kwargs['email'] = email
- if description:
- kwargs['description'] = description
- if ttl:
- kwargs['ttl'] = ttl
-
- data = [zone.to_dict(computed=False) for zone in
- self.conn.dns.zones(**kwargs)]
-
- self.exit_json(zones=data, changed=False)
+ self.exit_json(
+ changed=False,
+ zones=[z.to_dict(computed=False) for z in zones])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/endpoint.py b/ansible_collections/openstack/cloud/plugins/modules/endpoint.py
index e7864ecf1..be7cc7c52 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/endpoint.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/endpoint.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2017, VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -10,8 +11,9 @@ short_description: Manage OpenStack Identity service endpoints
author: OpenStack Ansible SIG
description:
- Create, update, or delete OpenStack Identity service endpoints. If a
- service with the same combination of I(service), I(interface) and I(region)
- exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
+ service with the same combination of I(service), I(interface) and
+ I(region) exist, the I(url), I(enabled) and I(state) (C(present) or
+ C(absent)) will be updated.
options:
service:
description:
@@ -31,7 +33,8 @@ options:
type: str
region:
description:
- - Region that the service belongs to. Note that I(region_name) is used for authentication.
+ - ID of the region that the service belongs to.
+ Note that I(region) is used for authentication.
type: str
enabled:
description:
@@ -44,10 +47,6 @@ options:
choices: [present, absent]
default: present
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.13.0"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -75,7 +74,7 @@ RETURN = '''
endpoint:
description: Dictionary describing the endpoint.
returned: On success when I(state) is C(present)
- type: complex
+ type: dict
contains:
id:
description: Endpoint ID.
@@ -85,7 +84,7 @@ endpoint:
description: Endpoint Interface.
type: str
sample: public
- enabled:
+ is_enabled:
description: Service status.
type: bool
sample: True
@@ -93,10 +92,10 @@ endpoint:
description: Links for the endpoint
type: str
sample: http://controller/identity/v3/endpoints/123
- region:
- description: Same as C(region_id). Deprecated.
+ name:
+ description: Name of the endpoint
type: str
- sample: RegionOne
+ sample: cinder
region_id:
description: Region ID.
type: str
@@ -116,12 +115,12 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityEndpointModule(OpenStackModule):
argument_spec = dict(
- service=dict(type='str', required=True),
- endpoint_interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
- url=dict(type='str', required=True),
- region=dict(type='str'),
+ service=dict(required=True),
+ endpoint_interface=dict(required=True, choices=['admin', 'public', 'internal']),
+ url=dict(required=True),
+ region=dict(),
enabled=dict(type='bool', default=True),
- state=dict(type='str', default='present', choices=['absent', 'present']),
+ state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
@@ -129,7 +128,7 @@ class IdentityEndpointModule(OpenStackModule):
)
def _needs_update(self, endpoint):
- if endpoint.enabled != self.params['enabled']:
+ if endpoint.is_enabled != self.params['enabled']:
return True
if endpoint.url != self.params['url']:
return True
@@ -151,11 +150,13 @@ class IdentityEndpointModule(OpenStackModule):
service_name_or_id = self.params['service']
interface = self.params['endpoint_interface']
url = self.params['url']
- region = self.params['region']
+ # Regions have IDs but do not have names
+ # Ref.: https://docs.openstack.org/api-ref/identity/v3/#regions
+ region_id = self.params['region']
enabled = self.params['enabled']
state = self.params['state']
- service = self.conn.get_service(service_name_or_id)
+ service = self.conn.identity.find_service(service_name_or_id)
if service is None and state == 'absent':
self.exit_json(changed=False)
@@ -164,49 +165,47 @@ class IdentityEndpointModule(OpenStackModule):
self.fail_json(msg='Service %s does not exist' % service_name_or_id)
filters = dict(service_id=service.id, interface=interface)
- if region is not None:
- filters['region'] = region
- endpoints = self.conn.search_endpoints(filters=filters)
+ if region_id:
+ filters['region_id'] = region_id
+ endpoints = list(self.conn.identity.endpoints(**filters))
endpoint = None
if len(endpoints) > 1:
self.fail_json(msg='Service %s, interface %s and region %s are '
'not unique' %
- (service_name_or_id, interface, region))
+ (service_name_or_id, interface, region_id))
elif len(endpoints) == 1:
endpoint = endpoints[0]
if self.ansible.check_mode:
self.exit_json(changed=self._system_state_change(endpoint))
+ changed = False
if state == 'present':
- if endpoint is None:
- args = {'url': url, 'interface': interface,
- 'service_name_or_id': service.id, 'enabled': enabled,
- 'region': region}
- endpoints = self.conn.create_endpoint(**args)
- # safe because endpoints contains a single item when url is
- # given to self.conn.create_endpoint()
- endpoint = endpoints[0]
-
+ if not endpoint:
+ args = {
+ 'url': url,
+ 'interface': interface,
+ 'service_id': service.id,
+ 'enabled': enabled,
+ 'region_id': region_id
+ }
+
+ endpoint = self.conn.identity.create_endpoint(**args)
+ changed = True
+ elif self._needs_update(endpoint):
+ endpoint = self.conn.identity.update_endpoint(
+ endpoint.id, url=url, enabled=enabled)
changed = True
- else:
- if self._needs_update(endpoint):
- endpoint = self.conn.update_endpoint(
- endpoint.id, url=url, enabled=enabled)
- changed = True
- else:
- changed = False
+
self.exit_json(changed=changed,
- endpoint=endpoint)
+ endpoint=endpoint.to_dict(computed=False))
- elif state == 'absent':
- if endpoint is None:
- changed = False
- else:
- self.conn.delete_endpoint(endpoint.id)
- changed = True
- self.exit_json(changed=changed)
+ elif state == 'absent' and endpoint:
+ self.conn.identity.delete_endpoint(endpoint.id)
+ changed = True
+
+ self.exit_json(changed=changed)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/federation_idp.py b/ansible_collections/openstack/cloud/plugins/modules/federation_idp.py
index 35606cca7..6c5674791 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/federation_idp.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/federation_idp.py
@@ -1,58 +1,57 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
----
+DOCUMENTATION = r'''
module: federation_idp
-short_description: manage a federation Identity Provider
+short_description: Manage an identity provider in a OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Manage a federation Identity Provider.
+ - Create, update or delete an identity provider of the OpenStack
+ identity (Keystone) service.
options:
- name:
- description:
- - The name of the Identity Provider.
- type: str
- required: true
- aliases: ['id']
- state:
- description:
- - Whether the Identity Provider should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
- type: str
description:
description:
- - The description of the Identity Provider.
+ - The description of the identity provider.
type: str
domain_id:
description:
- - The ID of a domain that is associated with the Identity Provider.
- Federated users that authenticate with the Identity Provider will be
+ - The ID of a domain that is associated with the identity provider.
+ - Federated users that authenticate with the identity provider will be
created under the domain specified.
- - Required when creating a new Identity Provider.
+ - Required when creating a new identity provider.
type: str
- enabled:
+ id:
+ description:
+ - The ID (and name) of the identity provider.
+ type: str
+ required: true
+ aliases: ['name']
+ is_enabled:
description:
- - Whether the Identity Provider is enabled or not.
- - Will default to C(true) when creating a new Identity Provider.
+ - Whether the identity provider is enabled or not.
+ - Will default to C(false) when creating a new identity provider.
type: bool
- aliases: ['is_enabled']
+ aliases: ['enabled']
remote_ids:
description:
- - "List of the unique Identity Provider's remote IDs."
- - Will default to an empty list when creating a new Identity Provider.
+ - "List of the unique identity provider's remote IDs."
+ - Will default to an empty list when creating a new identity provider.
type: list
elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+ state:
+ description:
+ - Whether the identity provider should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- name: Create an identity provider
openstack.cloud.federation_idp:
cloud: example_cloud
@@ -60,7 +59,7 @@ EXAMPLES = '''
domain_id: 0123456789abcdef0123456789abcdef
description: 'My example IDP'
remote_ids:
- - 'https://auth.example.com/auth/realms/ExampleRealm'
+ - 'https://auth.example.com/auth/realms/ExampleRealm'
- name: Delete an identity provider
openstack.cloud.federation_idp:
@@ -69,150 +68,86 @@ EXAMPLES = '''
state: absent
'''
-RETURN = '''
+RETURN = r'''
+identity_provider:
+ description: Dictionary describing the identity providers
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ description:
+ description: Identity provider description
+ type: str
+ sample: "demodescription"
+ domain_id:
+ description: Domain to which the identity provider belongs
+ type: str
+ sample: "default"
+ id:
+ description: Identity provider ID
+ type: str
+ sample: "test-idp"
+ is_enabled:
+ description: Indicates whether the identity provider is enabled
+ type: bool
+ name:
+ description: Name of the identity provider, equals its ID.
+ type: str
+ sample: "test-idp"
+ remote_ids:
+ description: Remote IDs associated with the identity provider
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
-class IdentityFederationIdpModule(OpenStackModule):
+class IdentityProviderModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=True, aliases=['id']),
- state=dict(default='present', choices=['absent', 'present']),
description=dict(),
domain_id=dict(),
- enabled=dict(type='bool', aliases=['is_enabled']),
+ id=dict(required=True, aliases=['name']),
+ is_enabled=dict(type='bool', aliases=['enabled']),
remote_ids=dict(type='list', elements='str'),
+ state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
supports_check_mode=True,
)
- def normalize_idp(self, idp):
- """
- Normalizes the IDP definitions so that the outputs are consistent with the
- parameters
-
- - "enabled" (parameter) == "is_enabled" (SDK)
- - "name" (parameter) == "id" (SDK)
- """
- if idp is None:
- return None
-
- _idp = idp.to_dict()
- _idp['enabled'] = idp['is_enabled']
- _idp['name'] = idp['id']
- return _idp
-
- def delete_identity_provider(self, idp):
- """
- Delete an existing Identity Provider
-
- returns: the "Changed" state
- """
- if idp is None:
- return False
-
- if self.ansible.check_mode:
- return True
-
- self.conn.identity.delete_identity_provider(idp)
- return True
-
- def create_identity_provider(self, name):
- """
- Create a new Identity Provider
-
- returns: the "Changed" state and the new identity provider
- """
-
- if self.ansible.check_mode:
- return True, None
-
- description = self.params.get('description')
- enabled = self.params.get('enabled')
- domain_id = self.params.get('domain_id')
- remote_ids = self.params.get('remote_ids')
-
- if enabled is None:
- enabled = True
- if remote_ids is None:
- remote_ids = []
-
- attributes = {
- 'domain_id': domain_id,
- 'enabled': enabled,
- 'remote_ids': remote_ids,
- }
- if description is not None:
- attributes['description'] = description
-
- idp = self.conn.identity.create_identity_provider(id=name, **attributes)
- return (True, idp)
-
- def update_identity_provider(self, idp):
- """
- Update an existing Identity Provider
-
- returns: the "Changed" state and the new identity provider
- """
-
- description = self.params.get('description')
- enabled = self.params.get('enabled')
- domain_id = self.params.get('domain_id')
- remote_ids = self.params.get('remote_ids')
-
- attributes = {}
-
- if (description is not None) and (description != idp.description):
- attributes['description'] = description
- if (enabled is not None) and (enabled != idp.is_enabled):
- attributes['enabled'] = enabled
- if (domain_id is not None) and (domain_id != idp.domain_id):
- attributes['domain_id'] = domain_id
- if (remote_ids is not None) and (remote_ids != idp.remote_ids):
- attributes['remote_ids'] = remote_ids
-
- if not attributes:
- return False, idp
-
- if self.ansible.check_mode:
- return True, None
-
- new_idp = self.conn.identity.update_identity_provider(idp, **attributes)
- return (True, new_idp)
-
def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
- state = self.params.get('state')
- changed = False
-
- idp = self.conn.identity.find_identity_provider(name)
-
- if state == 'absent':
- if idp is not None:
- changed = self.delete_identity_provider(idp)
- self.exit_json(changed=changed)
-
- # state == 'present'
+ sm = StateMachine(connection=self.conn,
+ service_name='identity',
+ type_name='identity_provider',
+ sdk=self.sdk)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['state', 'timeout']
+ if self.params[k] is not None)
+
+ kwargs['attributes'] = \
+ dict((k, self.params[k])
+ for k in ['description', 'domain_id', 'id', 'is_enabled',
+ 'remote_ids']
+ if self.params[k] is not None)
+
+ identity_provider, is_changed = \
+ sm(check_mode=self.ansible.check_mode,
+ updateable_attributes=None,
+ non_updateable_attributes=['domain_id'],
+ wait=False,
+ **kwargs)
+
+ if identity_provider is None:
+ self.exit_json(changed=is_changed)
else:
- if idp is None:
- if self.params.get('domain_id') is None:
- self.fail_json(msg='A domain_id must be passed when creating'
- ' an identity provider')
- (changed, idp) = self.create_identity_provider(name)
- idp = self.normalize_idp(idp)
- self.exit_json(changed=changed, identity_provider=idp)
-
- (changed, new_idp) = self.update_identity_provider(idp)
- new_idp = self.normalize_idp(new_idp)
- self.exit_json(changed=changed, identity_provider=new_idp)
+ self.exit_json(
+ changed=is_changed,
+ identity_provider=identity_provider.to_dict(computed=False))
def main():
- module = IdentityFederationIdpModule()
+ module = IdentityProviderModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/federation_idp_info.py b/ansible_collections/openstack/cloud/plugins/modules/federation_idp_info.py
index 4fe719494..af4c2ff8c 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/federation_idp_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/federation_idp_info.py
@@ -1,31 +1,26 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
----
+DOCUMENTATION = r'''
module: federation_idp_info
-short_description: Get the information about the available federation identity
- providers
+short_description: Fetch OpenStack federation identity providers
author: OpenStack Ansible SIG
description:
- - Fetch a federation identity provider.
+ - Fetch OpenStack federation identity providers.
options:
- name:
+ id:
description:
- - The name of the identity provider to fetch.
- - If I(name) is specified, the module will return failed if the identity
- provider doesn't exist.
+ - The ID (and name) of the identity provider to fetch.
type: str
- aliases: ['id']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+ aliases: ['name']
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- name: Fetch a specific identity provider
openstack.cloud.federation_idp_info:
cloud: example_cloud
@@ -36,7 +31,35 @@ EXAMPLES = '''
cloud: example_cloud
'''
-RETURN = '''
+RETURN = r'''
+identity_providers:
+ description: Dictionary describing the identity providers
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ description:
+ description: Identity provider description
+ type: str
+ sample: "demodescription"
+ domain_id:
+ description: Domain to which the identity provider belongs
+ type: str
+ sample: "default"
+ id:
+ description: Identity provider ID
+ type: str
+ sample: "test-idp"
+ is_enabled:
+ description: Indicates whether the identity provider is enabled
+ type: bool
+ name:
+ description: Name of the identity provider, equals its ID.
+ type: str
+ sample: "test-idp"
+ remote_ids:
+ description: Remote IDs associated with the identity provider
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -44,40 +67,21 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityFederationIdpInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(aliases=['id']),
+ id=dict(aliases=['name']),
)
module_kwargs = dict(
supports_check_mode=True
)
- def normalize_idp(self, idp):
- """
- Normalizes the IDP definitions so that the outputs are consistent with the
- parameters
-
- - "enabled" (parameter) == "is_enabled" (SDK)
- - "name" (parameter) == "id" (SDK)
- """
- if idp is None:
- return
-
- _idp = idp.to_dict()
- _idp['enabled'] = idp['is_enabled']
- _idp['name'] = idp['id']
- return _idp
-
def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
-
- if name:
- idp = self.normalize_idp(self.conn.identity.get_identity_provider(name))
- self.exit_json(changed=False, identity_providers=[idp])
-
- else:
- providers = list(map(self.normalize_idp, self.conn.identity.identity_providers()))
- self.exit_json(changed=False, identity_providers=providers)
+ kwargs = dict((k, self.params[k])
+ for k in ['id']
+ if self.params[k] is not None)
+ identity_providers = self.conn.identity.identity_providers(**kwargs)
+ self.exit_json(
+ changed=False,
+ identity_providers=[i.to_dict(computed=False)
+ for i in identity_providers])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/federation_mapping.py b/ansible_collections/openstack/cloud/plugins/modules/federation_mapping.py
index 6c07a41da..6603a3d29 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/federation_mapping.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/federation_mapping.py
@@ -1,8 +1,10 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: federation_mapping
short_description: Manage a federation mapping
@@ -16,19 +18,13 @@ options:
required: true
type: str
aliases: ['id']
- state:
- description:
- - Whether the mapping should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
- type: str
rules:
description:
- - The rules that comprise the mapping. These are pairs of I(local) and
- I(remote) definitions. For more details on how these work please see
+ - The rules that comprise the mapping. These are pairs of I(local) and
+ I(remote) definitions. For more details on how these work please see
the OpenStack documentation
U(https://docs.openstack.org/keystone/latest/admin/federation/mapping_combinations.html).
- - Required if I(state=present)
+ - Required if I(state) is C(present).
type: list
elements: dict
suboptions:
@@ -44,14 +40,19 @@ options:
required: true
type: list
elements: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+ state:
+ description:
+ - Whether the mapping should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
+notes:
+ - Name equals the ID of a mapping.
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- name: Create a new mapping
openstack.cloud.federation_mapping:
cloud: example_cloud
@@ -75,7 +76,23 @@ EXAMPLES = '''
state: absent
'''
-RETURN = '''
+RETURN = r'''
+mapping:
+ description: Dictionary describing the federation mapping.
+ returned: always
+ type: dict
+ contains:
+ id:
+ description: The id of the mapping
+ type: str
+ sample: "ansible-test-mapping"
+ name:
+ description: Name of the mapping. Equal to C(id).
+ type: str
+ sample: "ansible-test-mapping"
+ rules:
+ description: List of rules for the mapping
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -84,108 +101,94 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityFederationMappingModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True, aliases=['id']),
+ rules=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ local=dict(required=True, type='list', elements='dict'),
+ remote=dict(required=True, type='list', elements='dict')
+ )),
state=dict(default='present', choices=['absent', 'present']),
- rules=dict(type='list', elements='dict', options=dict(
- local=dict(required=True, type='list', elements='dict'),
- remote=dict(required=True, type='list', elements='dict')
- )),
)
+
module_kwargs = dict(
required_if=[('state', 'present', ['rules'])],
supports_check_mode=True
)
- def normalize_mapping(self, mapping):
- """
- Normalizes the mapping definitions so that the outputs are consistent with
- the parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if mapping is None:
- return None
-
- _mapping = mapping.to_dict()
- _mapping['name'] = mapping['id']
- return _mapping
-
- def create_mapping(self, name):
- """
- Attempt to create a Mapping
+ def run(self):
+ state = self.params['state']
- returns: A tuple containing the "Changed" state and the created mapping
- """
+ id = self.params['name']
+ mapping = self.conn.identity.find_mapping(id)
if self.ansible.check_mode:
- return (True, None)
+ self.exit_json(changed=self._will_change(state, mapping))
- rules = self.params.get('rules')
+ if state == 'present' and not mapping:
+ # Create mapping
+ mapping = self._create()
+ self.exit_json(changed=True,
+ mapping=mapping.to_dict(computed=False))
- mapping = self.conn.identity.create_mapping(id=name, rules=rules)
- return (True, mapping)
-
- def delete_mapping(self, mapping):
- """
- Attempt to delete a Mapping
-
- returns: the "Changed" state
- """
- if mapping is None:
- return False
+ elif state == 'present' and mapping:
+ # Update mapping
+ update = self._build_update(mapping)
+ if update:
+ mapping = self._update(mapping, update)
- if self.ansible.check_mode:
- return True
+ self.exit_json(changed=bool(update),
+ mapping=mapping.to_dict(computed=False))
- self.conn.identity.delete_mapping(mapping)
- return True
+ elif state == 'absent' and mapping:
+ # Delete mapping
+ self._delete(mapping)
+ self.exit_json(changed=True)
- def update_mapping(self, mapping):
- """
- Attempt to delete a Mapping
+ elif state == 'absent' and not mapping:
+ # Do nothing
+ self.exit_json(changed=False)
- returns: The "Changed" state and the the new mapping
- """
+ def _build_update(self, mapping):
+ update = {}
- current_rules = mapping.rules
- new_rules = self.params.get('rules')
+ if len(self.params['rules']) < 1:
+ self.fail_json(msg='At least one rule must be passed')
- # Nothing to do
- if current_rules == new_rules:
- return (False, mapping)
+ attributes = dict((k, self.params[k]) for k in ['rules']
+ if k in self.params and self.params[k] is not None
+ and self.params[k] != mapping[k])
- if self.ansible.check_mode:
- return (True, None)
+ if attributes:
+ update['attributes'] = attributes
- new_mapping = self.conn.identity.update_mapping(mapping, rules=new_rules)
- return (True, new_mapping)
+ return update
- def run(self):
- """ Module entry point """
+ def _create(self):
+ return self.conn.identity.create_mapping(id=self.params['name'],
+ rules=self.params['rules'])
- name = self.params.get('name')
- state = self.params.get('state')
- changed = False
+ def _delete(self, mapping):
+ self.conn.identity.delete_mapping(mapping.id)
- mapping = self.conn.identity.find_mapping(name)
+ def _update(self, mapping, update):
+ attributes = update.get('attributes')
+ if attributes:
+ mapping = self.conn.identity.update_mapping(mapping.id,
+ **attributes)
- if state == 'absent':
- if mapping is not None:
- changed = self.delete_mapping(mapping)
- self.exit_json(changed=changed)
+ return mapping
- # state == 'present'
+ def _will_change(self, state, mapping):
+ if state == 'present' and not mapping:
+ return True
+ elif state == 'present' and mapping:
+ return bool(self._build_update(mapping))
+ elif state == 'absent' and mapping:
+ return True
else:
- if len(self.params.get('rules')) < 1:
- self.fail_json(msg='At least one rule must be passed')
-
- if mapping is None:
- (changed, mapping) = self.create_mapping(name)
- mapping = self.normalize_mapping(mapping)
- self.exit_json(changed=changed, mapping=mapping)
- else:
- (changed, new_mapping) = self.update_mapping(mapping)
- new_mapping = self.normalize_mapping(new_mapping)
- self.exit_json(mapping=new_mapping, changed=changed)
+ # state == 'absent' and not mapping:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/federation_mapping_info.py b/ansible_collections/openstack/cloud/plugins/modules/federation_mapping_info.py
index 2ba317c98..7acc75066 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/federation_mapping_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/federation_mapping_info.py
@@ -1,41 +1,57 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: federation_mapping_info
-short_description: Get the information about the available federation mappings
+short_description: Fetch Keystone federation mappings
author: OpenStack Ansible SIG
description:
- - Fetch a federation mapping.
+ - Fetch Keystone federation mappings.
options:
name:
description:
- - The name of the mapping to fetch.
- - If I(name) is specified, the module will return failed if the mapping
- doesn't exist.
+ - ID or name of the federation mapping.
type: str
aliases: ['id']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+notes:
+ - Name equals the ID of a federation mapping.
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-- name: Fetch a specific mapping
+EXAMPLES = r'''
+- name: Fetch all federation mappings
openstack.cloud.federation_mapping_info:
cloud: example_cloud
- name: example_mapping
-- name: Fetch all mappings
+- name: Fetch federation mapping by name
openstack.cloud.federation_mapping_info:
cloud: example_cloud
+ name: example_mapping
'''
-RETURN = '''
+RETURN = r'''
+mappings:
+ description: List of federation mapping dictionaries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: The id of the mapping
+ type: str
+ sample: "ansible-test-mapping"
+ name:
+ description: Name of the mapping. Equal to C(id).
+ type: str
+ sample: "ansible-test-mapping"
+ rules:
+ description: List of rules for the mapping
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -45,38 +61,29 @@ class IdentityFederationMappingInfoModule(OpenStackModule):
argument_spec = dict(
name=dict(aliases=['id']),
)
+
module_kwargs = dict(
supports_check_mode=True
)
- module_min_sdk_version = "0.44"
-
- def normalize_mapping(self, mapping):
- """
- Normalizes the mapping definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if mapping is None:
- return None
-
- _mapping = mapping.to_dict()
- _mapping['name'] = mapping['id']
- return _mapping
-
def run(self):
- """ Module entry point """
- name = self.params.get('name')
-
- if name:
- mapping = self.normalize_mapping(
- self.conn.identity.get_mapping(name))
- self.exit_json(changed=False, mappings=[mapping])
+ # name is id for federation mappings
+ id = self.params['name']
+
+ if id:
+ # handle id parameter separately because self.conn.identity.\
+ # mappings() does not allow to filter by id
+ # Ref.: https://review.opendev.org/c/openstack/
+ # openstacksdk/+/858522
+ mapping = self.conn.identity.find_mapping(name_or_id=id,
+ ignore_missing=True)
+ mappings = [mapping] if mapping else []
else:
- mappings = list(map(
- self.normalize_mapping, self.conn.identity.mappings()))
- self.exit_json(changed=False, mappings=mappings)
+ mappings = self.conn.identity.mappings()
+
+ self.exit_json(changed=False,
+ mappings=[mapping.to_dict(computed=False)
+ for mapping in mappings])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/floating_ip.py b/ansible_collections/openstack/cloud/plugins/modules/floating_ip.py
index 6b5fb0d66..873aaefe5 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/floating_ip.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/floating_ip.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -7,74 +8,63 @@ DOCUMENTATION = '''
---
module: floating_ip
author: OpenStack Ansible SIG
-short_description: Add/Remove floating IP from an instance
+short_description: Manage floating IP addresses for OpenStack servers
description:
- - Add or Remove a floating IP to an instance.
- - Returns the floating IP when attaching only if I(wait=true).
- - When detaching a floating IP there might be a delay until an instance does not list the floating IP any more.
+ - Add or remove an floating ip address to/from an OpenStack server.
+ - Returns the floating IP when attaching only if I(wait) is C(true).
+ - When detaching a floating IP there might be a delay until an server
+ does not list the floating IP any more.
options:
- server:
+ fixed_address:
description:
- - The name or ID of the instance to which the IP address
- should be assigned.
- required: true
+ - To which fixed IP of server the floating IP address should be
+ attached to.
+ type: str
+ floating_ip_address:
+ description:
+ - A floating IP address to attach or to detach. When I(state) is
+ present can be used to specify a IP address to attach.
+ I(floating_ip_address) requires I(network) to be set.
+ type: str
+ nat_destination:
+ description:
+ - The name or id of a neutron private network that the fixed IP to
+ attach floating IP is on
+ aliases: ["fixed_network", "internal_network"]
type: str
network:
description:
- The name or ID of a neutron external network or a nova pool name.
type: str
- floating_ip_address:
+ purge:
description:
- - A floating IP address to attach or to detach. When I(state) is present
- can be used to specify a IP address to attach. I(floating_ip_address)
- requires I(network) to be set.
- type: str
+ - When I(state) is absent, indicates whether or not to delete the
+ floating IP completely, or only detach it from the server.
+ Default is to detach only.
+ type: bool
+ default: 'false'
reuse:
description:
- When I(state) is present, and I(floating_ip_address) is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
+ - When I(reuse) is C(true), I(network) is defined and
+ I(floating_ip_address) is undefined, then C(nat_destination) and
+ C(fixed_address) will be ignored.
type: bool
- default: 'no'
- fixed_address:
- description:
- - To which fixed IP of server the floating IP address should be
- attached to.
- type: str
- nat_destination:
+ default: 'false'
+ server:
description:
- - The name or id of a neutron private network that the fixed IP to
- attach floating IP is on
- aliases: ["fixed_network", "internal_network"]
+ - The name or ID of the server to which the IP address
+ should be assigned.
+ required: true
type: str
- wait:
- description:
- - When attaching a floating IP address, specify whether to wait for it to appear as attached.
- - Must be set to C(yes) for the module to return the value of the floating IP when attaching.
- type: bool
- default: 'no'
- timeout:
- description:
- - Time to wait for an IP address to appear as attached. See wait.
- required: false
- default: 60
- type: int
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
- purge:
- description:
- - When I(state) is absent, indicates whether or not to delete the floating
- IP completely, or only detach it from the server. Default is to detach only.
- type: bool
- default: 'no'
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -87,20 +77,20 @@ EXAMPLES = '''
cloud: dguerri
server: cattle001
-# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
+# Assign a new floating IP to the server fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- openstack.cloud.floating_ip:
cloud: dguerri
state: present
- reuse: yes
+ reuse: true
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
-# Assign a new floating IP from the network `ext_net` to the instance fixed
+# Assign a new floating IP from the network `ext_net` to the server fixed
# ip in network `private_net` of `cattle001`.
- openstack.cloud.floating_ip:
cloud: dguerri
@@ -119,183 +109,388 @@ EXAMPLES = '''
server: cattle001
'''
+RETURN = '''
+floating_ip:
+ description: Dictionary describing the floating ip address.
+ type: dict
+ returned: success
+ contains:
+ created_at:
+ description: Timestamp at which the floating IP was assigned.
+ type: str
+ description:
+ description: The description of a floating IP.
+ type: str
+ dns_domain:
+ description: The DNS domain.
+ type: str
+ dns_name:
+ description: The DNS name.
+ type: str
+ fixed_ip_address:
+ description: The fixed IP address associated with a floating IP address.
+ type: str
+ floating_ip_address:
+ description: The IP address of a floating IP.
+ type: str
+ floating_network_id:
+ description: The id of the network associated with a floating IP.
+ type: str
+ id:
+ description: Id of the floating ip.
+ type: str
+ name:
+ description: Name of the floating ip.
+ type: str
+ port_details:
+ description: |
+ The details of the port that this floating IP associates
+ with. Present if C(fip-port-details) extension is loaded.
+ type: dict
+ port_id:
+ description: The port ID floating ip associated with.
+ type: str
+ project_id:
+ description: The ID of the project this floating IP is associated with.
+ type: str
+ qos_policy_id:
+ description: The ID of the QoS policy attached to the floating IP.
+ type: str
+ revision_number:
+ description: Revision number.
+ type: str
+ router_id:
+ description: The id of the router floating ip associated with.
+ type: str
+ status:
+ description: |
+ The status of a floating IP, which can be 'ACTIVE' or 'DOWN'.
+ type: str
+ subnet_id:
+ description: The id of the subnet the floating ip associated with.
+ type: str
+ tags:
+ description: List of tags.
+ type: list
+ elements: str
+ updated_at:
+ description: Timestamp at which the floating IP was last updated.
+ type: str
+'''
+
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-import itertools
class NetworkingFloatingIPModule(OpenStackModule):
argument_spec = dict(
+ fixed_address=dict(),
+ floating_ip_address=dict(),
+ nat_destination=dict(aliases=['fixed_network', 'internal_network']),
+ network=dict(),
+ purge=dict(type='bool', default=False),
+ reuse=dict(type='bool', default=False),
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
- network=dict(required=False, default=None),
- floating_ip_address=dict(required=False, default=None),
- reuse=dict(required=False, type='bool', default=False),
- fixed_address=dict(required=False, default=None),
- nat_destination=dict(required=False, default=None,
- aliases=['fixed_network', 'internal_network']),
- wait=dict(required=False, type='bool', default=False),
- timeout=dict(required=False, type='int', default=60),
- purge=dict(required=False, type='bool', default=False),
)
module_kwargs = dict(
required_if=[
['state', 'absent', ['floating_ip_address']]
],
- required_by=dict(
- floating_ip_address=('network',)
- )
+ required_by={
+ 'floating_ip_address': ('network'),
+ }
)
- def _get_floating_ip(self, floating_ip_address):
- f_ips = self.conn.search_floating_ips(
- filters={'floating_ip_address': floating_ip_address})
+ def run(self):
+ self._init()
+ if self.params['state'] == 'present':
+ self._create_and_attach()
- if not f_ips:
- return None
+ else: # self.params['state'] == 'absent'
+ self._detach_and_delete()
- return f_ips[0]
+ def _create_and_attach(self):
+ changed = False
+ fixed_address = self.params['fixed_address']
+ floating_ip_address = self.params['floating_ip_address']
+ nat_destination_name_or_id = self.params['nat_destination']
+ network_id = self.network['id'] if self.network else None
- def _list_floating_ips(self, server):
- return itertools.chain.from_iterable([
- (addr['addr'] for addr in server.addresses[net] if addr['OS-EXT-IPS:type'] == 'floating')
- for net in server.addresses
- ])
+ ips = self._find_ips(
+ server=self.server,
+ floating_ip_address=floating_ip_address,
+ network_id=network_id,
+ fixed_address=fixed_address,
+ nat_destination_name_or_id=nat_destination_name_or_id)
- def _match_floating_ip(self, server,
- floating_ip_address,
- network_id,
- fixed_address,
- nat_destination):
+ # First floating ip satisfies our requirements
+ ip = ips[0] if ips else None
if floating_ip_address:
- return self._get_floating_ip(floating_ip_address)
- elif not fixed_address and nat_destination:
- nat_destination_name = self.conn.get_network(nat_destination)['name']
- return next(
- (self._get_floating_ip(addr['addr'])
- for addr in server.addresses.get(nat_destination_name, [])
- if addr['OS-EXT-IPS:type'] == 'floating'),
- None)
- else:
- # not floating_ip_address and (fixed_address or not nat_destination)
+ # A specific floating ip address has been requested
- # get any of the floating ips that matches fixed_address and/or network
- f_ip_addrs = self._list_floating_ips(server)
- f_ips = [f_ip for f_ip in self.conn.list_floating_ips() if f_ip['floating_ip_address'] in f_ip_addrs]
- return next(
- (f_ip for f_ip in f_ips
- if ((fixed_address and f_ip.fixed_ip_address == fixed_address) or not fixed_address)
- and ((network_id and f_ip.network == network_id) or not network_id)),
- None)
+ if not ip:
+ # If a specific floating ip address has been requested
+ # and it does not exist yet then create it
- def run(self):
- server_name_or_id = self.params['server']
- state = self.params['state']
- network = self.params['network']
- floating_ip_address = self.params['floating_ip_address']
- reuse = self.params['reuse']
- fixed_address = self.params['fixed_address']
- nat_destination = self.params['nat_destination']
- wait = self.params['wait']
- timeout = self.params['timeout']
- purge = self.params['purge']
+ # openstacksdk's create_ip requires floating_ip_address
+ # and floating_network_id to be set
+ self.conn.network.create_ip(
+ floating_ip_address=floating_ip_address,
+ floating_network_id=network_id)
+ changed = True
- server = self.conn.get_server(server_name_or_id)
- if not server:
- self.fail_json(
- msg="server {0} not found".format(server_name_or_id))
+ else: # ip
+ # Requested floating ip address exists already
+
+ if ip.port_details and (ip.port_details['status'] == 'ACTIVE') \
+ and (floating_ip_address not in self._filter_ips(
+ self.server)):
+ # Floating ip address exists and has been attached
+ # but to a different server
+
+ # Requested ip has been attached to different server
+ self.fail_json(
+ msg="Floating ip {0} has been attached to different "
+ "server".format(floating_ip_address))
+
+ if not ip \
+ or floating_ip_address not in self._filter_ips(self.server):
+ # Requested floating ip address does not exist or has not been
+ # assigned to server
+
+ self.conn.add_ip_list(
+ server=self.server,
+ ips=[floating_ip_address],
+ wait=self.params['wait'],
+ timeout=self.params['timeout'],
+ fixed_address=fixed_address)
+ changed = True
+ else:
+ # Requested floating ip address has been assigned to server
+ pass
+
+ elif not ips: # and not floating_ip_address
+ # No specific floating ip has been requested and none of the
+ # floating ips which have been assigned to the server matches
+ # requirements
+
+ # add_ips_to_server() will handle several scenarios:
+ #
+ # If a specific floating ip address has been requested then it
+ # will be attached to the server. The floating ip address has
+ # either been created in previous steps or it already existed.
+ # Ref.: https://github.com/openstack/openstacksdk/blob/
+ # 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud
+ # /_floating_ip.py#L985
+ #
+ # If no specific floating ip address has been requested, reuse
+ # is allowed and a network has been given (with ip_pool) from
+ # which floating ip addresses will be drawn, then any existing
+ # floating ip address from ip_pool=network which is not
+ # attached to any other server will be attached to the server.
+ # If no such floating ip address exists or if reuse is not
+ # allowed, then a new floating ip address will be created
+ # within ip_pool=network and attached to the server.
+ # Ref.: https://github.com/openstack/openstacksdk/blob/
+ # 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
+ # _floating_ip.py#L981
+ #
+ # If no specific floating ip address has been requested and no
+ # network has been given (with ip_pool) from which floating ip
+ # addresses will be taken, then a floating ip address might be
+ # added to the server, refer to _needs_floating_ip() for
+ # details.
+ # Ref.:
+ # * https://github.com/openstack/openstacksdk/blob/
+ # 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/\
+ # _floating_ip.py#L989
+ # * https://github.com/openstack/openstacksdk/blob/
+ # 9d3ee1d32149ba2a8bb3dc894295e180746cdddc/openstack/cloud/
+ # _floating_ip.py#L995
+ #
+ # Both floating_ip_address and network are mutually exclusive
+ # in add_ips_to_server(), i.e.add_ips_to_server will ignore
+ # floating_ip_address if network is not None. To prefer
+ # attaching a specific floating ip address over assigning any
+ # fip, ip_pool is only defined if floating_ip_address is None.
+ # Ref.: https://github.com/openstack/openstacksdk/blob/
+ # a6b0ece2821ea79330c4067100295f6bdcbe456e/openstack/cloud/
+ # _floating_ip.py#L987
+ self.conn.add_ips_to_server(
+ server=self.server,
+ ip_pool=network_id,
+ ips=None, # No specific floating ip requested
+ reuse=self.params['reuse'],
+ fixed_address=fixed_address,
+ wait=self.params['wait'],
+ timeout=self.params['timeout'],
+ nat_destination=nat_destination_name_or_id)
+ changed = True
+ else:
+ # Found one or more floating ips which satisfy requirements
+ pass
+
+ if changed:
+ # update server details such as addresses
+ self.server = self.conn.compute.get_server(self.server)
+
+ # Update the floating ip resource
+ ips = self._find_ips(
+ self.server, floating_ip_address, network_id,
+ fixed_address, nat_destination_name_or_id)
+
+ # ips can be empty, e.g. when server has no private ipv4
+ # address to which a floating ip address can be attached
+
+ self.exit_json(
+ changed=changed,
+ floating_ip=ips[0].to_dict(computed=False) if ips else None)
+
+ def _detach_and_delete(self):
+ ips = self._find_ips(
+ server=self.server,
+ floating_ip_address=self.params['floating_ip_address'],
+ network_id=self.network['id'] if self.network else None,
+ fixed_address=self.params['fixed_address'],
+ nat_destination_name_or_id=self.params['nat_destination'])
+
+ if not ips:
+ # Nothing to detach
+ self.exit_json(changed=False)
+
+ changed = False
+ for ip in ips:
+ if ip['fixed_ip_address']:
+ # Silently ignore that ip might not be attached to server
+ #
+ # self.conn.network.update_ip(ip_id, port_id=None) does not
+ # handle nova network but self.conn.detach_ip_from_server()
+ # does so
+ self.conn.detach_ip_from_server(server_id=self.server['id'],
+ floating_ip_id=ip['id'])
+
+ # OpenStackSDK sets {"port_id": None} to detach a floating
+ # ip from a device, but there might be a delay until a
+ # server does not list it in addresses any more.
+ changed = True
- # Extract floating ips from server
- f_ip_addrs = self._list_floating_ips(server)
+ if self.params['purge']:
+ self.conn.network.delete_ip(ip['id'])
+ changed = True
- # Get details about requested floating ip
- f_ip = self._get_floating_ip(floating_ip_address) if floating_ip_address else None
+ self.exit_json(changed=changed)
- if network:
- network_id = self.conn.get_network(name_or_id=network)["id"]
+ def _filter_ips(self, server):
+ # Extract floating ips from server
+
+ def _flatten(lists):
+ return [item for sublist in lists for item in sublist]
+
+ if server['addresses'] is None:
+ # fetch server with details
+ server = self.conn.compute.get_server(server)
+
+ if not server['addresses']:
+ return []
+
+ # Returns a list not an iterator here because
+ # it is iterated several times below
+ return [address['addr']
+ for address in _flatten(server['addresses'].values())
+ if address['OS-EXT-IPS:type'] == 'floating']
+
+ def _find_ips(self,
+ server,
+ floating_ip_address,
+ network_id,
+ fixed_address,
+ nat_destination_name_or_id):
+ # Check which floating ips matches our requirements.
+ # They might or might not be attached to our server.
+ if floating_ip_address:
+ # A specific floating ip address has been requested
+ ip = self.conn.network.find_ip(floating_ip_address)
+ return [ip] if ip else []
+ elif (not fixed_address and nat_destination_name_or_id):
+ # No specific floating ip and no specific fixed ip have been
+ # requested but a private network (nat_destination) has been
+ # given where the floating ip should be attached to.
+ return self._find_ips_by_nat_destination(
+ server, nat_destination_name_or_id)
else:
- network_id = None
-
- if state == 'present':
- if floating_ip_address and f_ip and floating_ip_address in f_ip_addrs:
- # Floating ip address has been assigned to server
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if f_ip and f_ip['attached'] and floating_ip_address not in f_ip_addrs:
- # Requested floating ip has been attached to different server
- self.fail_json(msg="floating-ip {floating_ip_address} already has been attached to different server"
- .format(floating_ip_address=floating_ip_address))
-
- if not floating_ip_address:
- # No specific floating ip requested, i.e. if any floating ip is already assigned to server,
- # check that it matches requirements.
-
- if not fixed_address and nat_destination:
- # Check if we have any floating ip on the given nat_destination network
- nat_destination_name = self.conn.get_network(nat_destination)['name']
- for addr in server.addresses.get(nat_destination_name, []):
- if addr['OS-EXT-IPS:type'] == 'floating':
- # A floating ip address has been assigned to the requested nat_destination
- f_ip = self._get_floating_ip(addr['addr'])
- self.exit_json(changed=False, floating_ip=f_ip)
- # else fixed_address or not nat_destination, hence an
- # analysis of all floating ips of server is required
- f_ips = [f_ip for f_ip in self.conn.list_floating_ips() if f_ip['floating_ip_address'] in f_ip_addrs]
- for f_ip in f_ips:
- if network_id and f_ip.network != network_id:
- # requested network does not match network of floating ip
- continue
-
- if not fixed_address and not nat_destination:
- # any floating ip will fullfil these requirements
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if fixed_address and f_ip.fixed_ip_address == fixed_address:
- # a floating ip address has been assigned that points to the requested fixed_address
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if floating_ip_address and not f_ip:
- # openstacksdk's create_ip requires floating_ip_address and floating_network_id to be set
- self.conn.network.create_ip(floating_ip_address=floating_ip_address, floating_network_id=network_id)
- # Else floating ip either does not exist or has not been attached yet
-
- # Both floating_ip_address and network are mutually exclusive in add_ips_to_server, i.e.
- # add_ips_to_server will ignore floating_ip_address if network is set
- # Ref.: https://github.com/openstack/openstacksdk/blob/a6b0ece2821ea79330c4067100295f6bdcbe456e/openstack/cloud/_floating_ip.py#L987
- server = self.conn.add_ips_to_server(
- server=server,
- ips=floating_ip_address,
- ip_pool=network if not floating_ip_address else None,
- reuse=reuse,
- fixed_address=fixed_address,
- wait=wait,
- timeout=timeout, nat_destination=nat_destination)
-
- # Update the floating ip status
- f_ip = self._match_floating_ip(server, floating_ip_address, network_id, fixed_address, nat_destination)
- self.exit_json(changed=True, floating_ip=f_ip)
-
- elif state == 'absent':
- f_ip = self._match_floating_ip(server, floating_ip_address, network_id, fixed_address, nat_destination)
- if not f_ip:
- # Nothing to detach
- self.exit_json(changed=False)
- changed = False
-
- if f_ip["fixed_ip_address"]:
- self.conn.detach_ip_from_server(server_id=server['id'], floating_ip_id=f_ip['id'])
- # OpenStackSDK sets {"port_id": None} to detach a floating ip from an instance,
- # but there might be a delay until a server does not list it in addresses any more.
-
- # Update the floating IP status
- f_ip = self.conn.get_floating_ip(id=f_ip['id'])
- changed = True
+ # not floating_ip_address
+ # and (fixed_address or not nat_destination_name_or_id)
+
+ # An analysis of all floating ips of server is required
+ return self._find_ips_by_network_id_and_fixed_address(
+ server, fixed_address, network_id)
+
+ def _find_ips_by_nat_destination(self,
+ server,
+ nat_destination_name_or_id):
- if purge:
- self.conn.delete_floating_ip(f_ip['id'])
- self.exit_json(changed=True)
- self.exit_json(changed=changed, floating_ip=f_ip)
+ if not server['addresses']:
+ return None
+
+ # Check if we have any floating ip on
+ # the given nat_destination network
+ nat_destination = self.conn.network.find_network(
+ nat_destination_name_or_id, ignore_missing=False)
+
+ fips_with_nat_destination = [
+ addr for addr
+ in server['addresses'].get(nat_destination['name'], [])
+ if addr['OS-EXT-IPS:type'] == 'floating']
+
+ if not fips_with_nat_destination:
+ return None
+
+ # One or more floating ip addresses have been assigned
+ # to the requested nat_destination; return the first.
+ return [self.conn.network.find_ip(fip['addr'], ignore_missing=False)
+ for fip in fips_with_nat_destination]
+
+ def _find_ips_by_network_id_and_fixed_address(self,
+ server,
+ fixed_address=None,
+ network_id=None):
+ # Get any of the floating ips that matches fixed_address and/or network
+ ips = [ip for ip in self.conn.network.ips()
+ if ip['floating_ip_address'] in self._filter_ips(server)]
+
+ matching_ips = []
+ for ip in ips:
+ if network_id and ip['floating_network_id'] != network_id:
+ # Requested network does not
+ # match network of floating ip
+ continue
+
+ if not fixed_address: # and not nat_destination_name_or_id
+ # Any floating ip will fullfil these requirements
+ matching_ips.append(ip)
+
+ if (fixed_address and ip['fixed_ip_address'] == fixed_address):
+ # A floating ip address has been assigned that
+ # points to the requested fixed_address
+ matching_ips.append(ip)
+
+ return matching_ips
+
+ def _init(self):
+ server_name_or_id = self.params['server']
+ server = self.conn.compute.find_server(server_name_or_id,
+ ignore_missing=False)
+ # fetch server details such as addresses
+ self.server = self.conn.compute.get_server(server)
+
+ network_name_or_id = self.params['network']
+ if network_name_or_id:
+ self.network = self.conn.network.find_network(
+ name_or_id=network_name_or_id, ignore_missing=False)
+ else:
+ self.network = None
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/floating_ip_info.py b/ansible_collections/openstack/cloud/plugins/modules/floating_ip_info.py
index 50e7c879f..e9a9097d7 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/floating_ip_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/floating_ip_info.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -32,23 +32,20 @@ options:
description:
- The name or id of the port to which a floating IP is associated.
type: str
- project_id:
+ project:
description:
- - The ID of the project a floating IP is associated with.
+ - The name or ID of the project a floating IP is associated with.
type: str
+ aliases: ['project_id']
router:
description:
- The name or id of an associated router.
type: str
status:
description:
- - The status of a floating IP, which can be ``ACTIVE``or ``DOWN``.
+ - The status of a floating IP.
choices: ['active', 'down']
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -56,8 +53,9 @@ extends_documentation_fragment:
RETURN = '''
floating_ips:
description: The floating ip objects list.
- type: complex
- returned: On Success.
+ type: list
+ elements: dict
+ returned: success
contains:
created_at:
description: Timestamp at which the floating IP was assigned.
@@ -87,9 +85,10 @@ floating_ips:
description: Name of the floating ip.
type: str
port_details:
- description: The details of the port that this floating IP associates \
- with. Present if ``fip-port-details`` extension is loaded.
- type: str
+ description: |
+ The details of the port that this floating IP associates
+ with. Present if C(fip-port-details) extension is loaded.
+ type: dict
port_id:
description: The port ID floating ip associated with.
type: str
@@ -106,15 +105,16 @@ floating_ips:
description: The id of the router floating ip associated with.
type: str
status:
- description: The status of a floating IP, which can be ``ACTIVE``or ``DOWN``.\
- Can be 'ACTIVE' and 'DOWN'.
+ description: |
+ The status of a floating IP, which can be 'ACTIVE' or 'DOWN'.
type: str
subnet_id:
description: The id of the subnet the floating ip associated with.
type: str
tags:
description: List of tags.
- type: str
+ type: list
+ elements: str
updated_at:
description: Timestamp at which the floating IP was last updated.
type: str
@@ -141,60 +141,56 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class FloatingIPInfoModule(OpenStackModule):
argument_spec = dict(
- description=dict(required=False),
- fixed_ip_address=dict(required=False),
- floating_ip_address=dict(required=False),
- floating_network=dict(required=False),
- port=dict(required=False),
- project_id=dict(required=False),
- router=dict(required=False),
- status=dict(required=False, choices=['active', 'down']),
+ description=dict(),
+ fixed_ip_address=dict(),
+ floating_ip_address=dict(),
+ floating_network=dict(),
+ port=dict(),
+ project=dict(aliases=['project_id']),
+ router=dict(),
+ status=dict(choices=['active', 'down']),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
+ query = dict((k, self.params[k])
+ for k in ['description', 'fixed_ip_address',
+ 'floating_ip_address']
+ if self.params[k] is not None)
+
+ for k in ['port', 'router']:
+ if self.params[k]:
+ k_id = '{0}_id'.format(k)
+ find_name = 'find_{0}'.format(k)
+ query[k_id] = getattr(self.conn.network, find_name)(
+ name_or_id=self.params[k], ignore_missing=False)['id']
+
+ floating_network_name_or_id = self.params['floating_network']
+ if floating_network_name_or_id:
+ query['floating_network_id'] = self.conn.network.find_network(
+ name_or_id=floating_network_name_or_id,
+ ignore_missing=False)['id']
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id:
+ project = self.conn.identity.find_project(project_name_or_id)
+ if project:
+ query['project_id'] = project['id']
+ else:
+ # caller might not have permission to query projects
+ # so assume she gave a project id
+ query['project_id'] = project_name_or_id
- description = self.params['description']
- fixed_ip_address = self.params['fixed_ip_address']
- floating_ip_address = self.params['floating_ip_address']
- floating_network = self.params['floating_network']
- port = self.params['port']
- project_id = self.params['project_id']
- router = self.params['router']
status = self.params['status']
-
- query = {}
- if description:
- query['description'] = description
- if fixed_ip_address:
- query['fixed_ip_address'] = fixed_ip_address
- if floating_ip_address:
- query['floating_ip_address'] = floating_ip_address
- if floating_network:
- try:
- query['floating_network_id'] = self.conn.network.find_network(name_or_id=floating_network,
- ignore_missing=False).id
- except self.sdk.exceptions.ResourceNotFound:
- self.fail_json(msg="floating_network not found")
- if port:
- try:
- query['port_id'] = self.conn.network.find_port(name_or_id=port, ignore_missing=False).id
- except self.sdk.exceptions.ResourceNotFound:
- self.fail_json(msg="port not found")
- if project_id:
- query['project_id'] = project_id
- if router:
- try:
- query['router_id'] = self.conn.network.find_router(name_or_id=router, ignore_missing=False).id
- except self.sdk.exceptions.ResourceNotFound:
- self.fail_json(msg="router not found")
if status:
query['status'] = status.upper()
- ips = [ip.to_dict(computed=False) for ip in self.conn.network.ips(**query)]
- self.exit_json(changed=False, floating_ips=ips)
+ self.exit_json(
+ changed=False,
+ floating_ips=[ip.to_dict(computed=False)
+ for ip in self.conn.network.ips(**query)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/group_assignment.py b/ansible_collections/openstack/cloud/plugins/modules/group_assignment.py
index ce8f28e12..19e9e21bf 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/group_assignment.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/group_assignment.py
@@ -1,45 +1,43 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: group_assignment
-short_description: Associate OpenStack Identity users and groups
+short_description: Assign OpenStack identity users to groups
author: OpenStack Ansible SIG
description:
- - Add and remove users from groups
+ - Add and remove OpenStack identity (Keystone) users to/from groups.
options:
- user:
- description:
- - Name or id for the user
- required: true
- type: str
- group:
- description:
- - Name or id for the group.
- required: true
- type: str
- state:
- description:
- - Should the user be present or absent in the group
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ group:
+ description:
+ - Name or ID for the group.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the user be present or absent in the group.
+ choices: [present, absent]
+ default: present
+ type: str
+ user:
+ description:
+ - Name or ID for the user.
+ required: true
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Add the demo user to the demo group
-- openstack.cloud.group_assignment:
- cloud: mycloud
- user: demo
- group: demo
+EXAMPLES = r'''
+- name: Add demo_user user to demo_group group
+ openstack.cloud.group_assignment:
+ cloud: mycloud
+ user: demo_user
+ group: demo_group
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -47,44 +45,42 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityGroupAssignment(OpenStackModule):
argument_spec = dict(
- user=dict(required=True),
group=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
+ user=dict(required=True),
)
module_kwargs = dict(
supports_check_mode=True
)
- def _system_state_change(self, state, in_group):
- if state == 'present' and not in_group:
- return True
- if state == 'absent' and in_group:
- return True
- return False
-
def run(self):
- user = self.params['user']
- group = self.params['group']
- state = self.params['state']
+ user_name_or_id = self.params['user']
+ user = self.conn.identity.find_user(user_name_or_id,
+ ignore_missing=False)
- in_group = self.conn.is_user_in_group(user, group)
+ group_name_or_id = self.params['group']
+ group = self.conn.identity.find_group(group_name_or_id,
+ ignore_missing=False)
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, in_group))
-
- changed = False
- if state == 'present':
- if not in_group:
- self.conn.add_user_to_group(user, group)
- changed = True
-
- elif state == 'absent':
- if in_group:
- self.conn.remove_user_from_group(user, group)
- changed = True
+ is_user_in_group = \
+ self.conn.identity.check_user_in_group(user, group)
- self.exit_json(changed=changed)
+ state = self.params['state']
+ if self.ansible.check_mode:
+ self.exit_json(
+ changed=(
+ (state == 'present' and not is_user_in_group)
+ or (state == 'absent' and is_user_in_group)))
+
+ if state == 'present' and not is_user_in_group:
+ self.conn.identity.add_user_to_group(user, group)
+ self.exit_json(changed=True)
+ elif state == 'absent' and is_user_in_group:
+ self.conn.identity.remove_user_from_group(user, group)
+ self.exit_json(changed=True)
+ else:
+ self.exit_json(changed=False)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/host_aggregate.py b/ansible_collections/openstack/cloud/plugins/modules/host_aggregate.py
index 4c95fd299..c57e739fe 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/host_aggregate.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/host_aggregate.py
@@ -1,4 +1,6 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright 2016 Jakub Jursa <jakub.jursa1@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -35,10 +37,6 @@ options:
choices: [present, absent]
default: present
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -76,16 +74,22 @@ EXAMPLES = '''
RETURN = r'''
aggregate:
description: A host aggregate resource.
- type: complex
+ type: dict
returned: On success, when I(state) is present
contains:
availability_zone:
description: Availability zone of the aggregate
type: str
returned: always
- deleted:
- description: Whether or not the resource is deleted
- type: bool
+ created_at:
+ description: The date and time when the resource was created
+ type: str
+ returned: always
+ deleted_at:
+ description:
+ - The date and time when the resource was deleted
+ - Null unless I(is_deleted) is true
+ type: str
returned: always
hosts:
description: Hosts belonging to the aggregate
@@ -95,6 +99,10 @@ aggregate:
description: The UUID of the aggregate.
type: str
returned: always
+ is_deleted:
+ description: Whether or not the resource is deleted
+ type: bool
+ returned: always
metadata:
description: Metadata attached to the aggregate
type: str
@@ -103,6 +111,14 @@ aggregate:
description: Name of the aggregate
type: str
returned: always
+ updated_at:
+ description: The date and time when the resource was updated
+ type: str
+ returned: always
+ uuid:
+ description: UUID of the aggregate
+ type: str
+ returned: always
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -111,9 +127,9 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ComputeHostAggregateModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
- metadata=dict(required=False, default=None, type='dict'),
- availability_zone=dict(required=False, default=None),
- hosts=dict(required=False, default=None, type='list', elements='str'),
+ metadata=dict(type='dict'),
+ availability_zone=dict(),
+ hosts=dict(type='list', elements='str'),
purge_hosts=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -122,14 +138,6 @@ class ComputeHostAggregateModule(OpenStackModule):
supports_check_mode=True
)
- def _find_aggregate(self, name_or_id):
- aggregates = self.conn.search_aggregates(name_or_id=name_or_id)
- if len(aggregates) == 1:
- return aggregates[0]
- elif len(aggregates) == 0:
- return None
- raise Exception("Aggregate is not unique, this should be impossible")
-
def _needs_update(self, aggregate):
new_metadata = self.params['metadata'] or {}
@@ -170,14 +178,14 @@ class ComputeHostAggregateModule(OpenStackModule):
hosts_to_add = set(hosts) - set(aggregate['hosts'] or [])
for host in hosts_to_add:
- self.conn.add_host_to_aggregate(aggregate.id, host)
+ self.conn.compute.add_host_to_aggregate(aggregate.id, host)
if not purge_hosts:
return
hosts_to_remove = set(aggregate["hosts"] or []) - set(hosts)
for host in hosts_to_remove:
- self.conn.remove_host_from_aggregate(aggregate.id, host)
+ self.conn.compute.remove_host_from_aggregate(aggregate.id, host)
def run(self):
name = self.params['name']
@@ -190,7 +198,7 @@ class ComputeHostAggregateModule(OpenStackModule):
if metadata is not None:
metadata.pop('availability_zone', None)
- aggregate = self._find_aggregate(name)
+ aggregate = self.conn.compute.find_aggregate(name)
if self.ansible.check_mode:
self.exit_json(changed=self._system_state_change(aggregate))
@@ -198,31 +206,33 @@ class ComputeHostAggregateModule(OpenStackModule):
changed = False
if state == 'present':
if aggregate is None:
- aggregate = self.conn.create_aggregate(
+ aggregate = self.conn.compute.create_aggregate(
name=name, availability_zone=availability_zone)
self._update_hosts(aggregate, hosts, False)
if metadata:
- self.conn.set_aggregate_metadata(aggregate.id, metadata)
+ self.conn.compute.set_aggregate_metadata(aggregate, metadata)
changed = True
elif self._needs_update(aggregate):
if availability_zone is not None:
- aggregate = self.conn.update_aggregate(
- aggregate.id, name=name,
+ aggregate = self.conn.compute.update_aggregate(
+ aggregate, name=name,
availability_zone=availability_zone)
if metadata is not None:
metas = metadata
for i in set(aggregate.metadata.keys() - set(metadata.keys())):
if i != 'availability_zone':
metas[i] = None
- self.conn.set_aggregate_metadata(aggregate.id, metas)
+ self.conn.compute.set_aggregate_metadata(aggregate, metas)
self._update_hosts(aggregate, hosts, purge_hosts)
changed = True
- aggregate = self._find_aggregate(name)
+ aggregate = self.conn.compute.find_aggregate(name)
+ if aggregate:
+ aggregate = aggregate.to_dict(computed=False)
self.exit_json(changed=changed, aggregate=aggregate)
elif state == 'absent' and aggregate is not None:
self._update_hosts(aggregate, [], True)
- self.conn.delete_aggregate(aggregate.id)
+ self.conn.compute.delete_aggregate(aggregate.id)
changed = True
self.exit_json(changed=changed)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_domain.py b/ansible_collections/openstack/cloud/plugins/modules/identity_domain.py
index 660748c49..df529ea09 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_domain.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_domain.py
@@ -1,98 +1,92 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_domain
-short_description: Manage OpenStack Identity Domains
+short_description: Manage OpenStack identity (Keystone) domains
author: OpenStack Ansible SIG
description:
- - Create, update, or delete OpenStack Identity domains. If a domain
- with the supplied name already exists, it will be updated with the
- new description and enabled attributes.
+ - Create, update or delete OpenStack identity (Keystone) domains.
options:
- name:
- description:
- - Name that has to be given to the instance
- required: true
- type: str
- description:
- description:
- - Description of the domain
- type: str
- enabled:
- description:
- - Is the domain enabled
- type: bool
- default: 'yes'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Domain description.
+ type: str
+ is_enabled:
+ description:
+ - Whether this domain is enabled or not.
+ type: bool
+ aliases: ['enabled']
+ name:
+ description:
+ - Domain name.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a domain
-- openstack.cloud.identity_domain:
- cloud: mycloud
- state: present
- name: demo
- description: Demo Domain
-
-# Delete a domain
-- openstack.cloud.identity_domain:
- cloud: mycloud
- state: absent
- name: demo
+EXAMPLES = r'''
+- name: Create a domain
+ openstack.cloud.identity_domain:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: Demo Domain
+
+- name: Delete a domain
+ openstack.cloud.identity_domain:
+ cloud: mycloud
+ state: absent
+ name: demo
'''
-RETURN = '''
+RETURN = r'''
domain:
- description: Dictionary describing the domain.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Domain ID.
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
- name:
- description: Domain name.
- type: str
- sample: "demo"
- description:
- description: Domain description.
- type: str
- sample: "Demo Domain"
- enabled:
- description: Domain description.
- type: bool
- sample: True
-
-id:
- description: The domain ID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ description: Dictionary describing the domain.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ description:
+ description: Domain description.
+ type: str
+ sample: "Demo Domain"
+ id:
+ description: Domain ID.
+ type: str
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ is_enabled:
+ description: Domain description.
+ type: bool
+ sample: True
+ links:
+ description: The links related to the domain resource
+ type: list
+ name:
+ description: Domain name.
+ type: str
+ sample: "demo"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
class IdentityDomainModule(OpenStackModule):
argument_spec = dict(
+ description=dict(),
+ is_enabled=dict(type='bool', aliases=['enabled']),
name=dict(required=True),
- description=dict(default=None),
- enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -100,70 +94,38 @@ class IdentityDomainModule(OpenStackModule):
supports_check_mode=True
)
- def _needs_update(self, domain):
- if self.params['description'] is not None and \
- domain.description != self.params['description']:
- return True
- if domain.get(
- "is_enabled", domain.get("enabled")) != self.params['enabled']:
- return True
- return False
-
- def _system_state_change(self, domain):
- state = self.params['state']
- if state == 'absent' and domain:
- return True
-
- if state == 'present':
- if domain is None:
- return True
- return self._needs_update(domain)
-
- return False
+ class _StateMachine(StateMachine):
+ def _delete(self, resource, attributes, timeout, wait, **kwargs):
+ # a domain must be disabled before it can be deleted and
+ # openstacksdk's cloud layer delete_domain() will just do that.
+ self.connection.delete_domain(resource['id'])
def run(self):
- name = self.params['name']
- description = self.params['description']
- enabled = self.params['enabled']
- state = self.params['state']
-
- domains = list(self.conn.identity.domains(name=name))
-
- if len(domains) > 1:
- self.fail_json(msg='Domain name %s is not unique' % name)
- elif len(domains) == 1:
- domain = domains[0]
+ sm = self._StateMachine(connection=self.conn,
+ service_name='identity',
+ type_name='domain',
+ sdk=self.sdk)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['state', 'timeout']
+ if self.params[k] is not None)
+
+ kwargs['attributes'] = \
+ dict((k, self.params[k])
+ for k in ['description', 'is_enabled', 'name']
+ if self.params[k] is not None)
+
+ domain, is_changed = sm(check_mode=self.ansible.check_mode,
+ updateable_attributes=None,
+ non_updateable_attributes=None,
+ wait=False,
+ **kwargs)
+
+ if domain is None:
+ self.exit_json(changed=is_changed)
else:
- domain = None
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(domain))
-
- if state == 'present':
- if domain is None:
- domain = self.conn.create_domain(
- name=name, description=description, enabled=enabled)
- changed = True
- else:
- if self._needs_update(domain):
- domain = self.conn.update_domain(
- domain.id, name=name, description=description,
- enabled=enabled)
- changed = True
- else:
- changed = False
- if hasattr(domain, "to_dict"):
- domain = domain.to_dict()
- domain.pop("location")
- self.exit_json(changed=changed, domain=domain, id=domain['id'])
-
- elif state == 'absent':
- if domain is None:
- changed = False
- else:
- self.conn.delete_domain(domain.id)
- changed = True
- self.exit_json(changed=changed)
+ self.exit_json(changed=is_changed,
+ domain=domain.to_dict(computed=False))
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_domain_info.py b/ansible_collections/openstack/cloud/plugins/modules/identity_domain_info.py
index e0e33cde5..db74fd34b 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_domain_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_domain_info.py
@@ -1,85 +1,70 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_domain_info
-short_description: Retrieve information about one or more OpenStack domains
+short_description: Fetch identity (Keystone) domains from OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Retrieve information about a one or more OpenStack domains
- - This module was called C(openstack.cloud.identity_domain_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.identity_domain_info) module no longer returns C(ansible_facts)!
+ - Fetch identity (Keystone) domains from OpenStack cloud
options:
- name:
- description:
- - Name or ID of the domain
- type: str
- filters:
- description:
- - A dictionary of meta data to use for filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ filters:
+ description:
+ - A dictionary of meta data to use for filtering.
+ - Elements of this dictionary may be additional dictionaries.
+ type: dict
+ name:
+ description:
+ - Name or ID of the domain
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about previously created domain
-- openstack.cloud.identity_domain_info:
+EXAMPLES = r'''
+- name: Gather information about previously created domain
+ openstack.cloud.identity_domain_info:
cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
-# Gather information about a previously created domain by name
-- openstack.cloud.identity_domain_info:
+- name: Gather information about a previously created domain by name
+ openstack.cloud.identity_domain_info:
cloud: awesomecloud
name: demodomain
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
-# Gather information about a previously created domain with filter
-- openstack.cloud.identity_domain_info:
+- name: Gather information about a previously created domain with filter
+ openstack.cloud.identity_domain_info:
cloud: awesomecloud
name: demodomain
filters:
- enabled: false
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
+ is_enabled: false
'''
-
-RETURN = '''
-openstack_domains:
- description: has all the OpenStack information about domains
- returned: always, but can be null
- type: list
- elements: dict
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the domain.
- returned: success
- type: str
- description:
- description: Description of the domain.
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the domain is enabled.
- returned: success
- type: bool
+RETURN = r'''
+domains:
+ description: List of dictionaries describing OpenStack domains
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ description:
+ description: Description of the domain.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_enabled:
+ description: Flag to indicate if the domain is enabled.
+ type: bool
+ links:
+ description: The links related to the domain resource
+ type: list
+ name:
+ description: Name given to the domain.
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -87,27 +72,27 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityDomainInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
+ filters=dict(type='dict'),
+ name=dict(),
)
module_kwargs = dict(
supports_check_mode=True
)
- deprecated_names = ('openstack.cloud.identity_domain_facts')
-
def run(self):
+ kwargs = {}
name = self.params['name']
- filters = self.params['filters'] or {}
+ if name is not None:
+ kwargs['name_or_id'] = name
- args = {}
- if name:
- args['name_or_id'] = name
- args['filters'] = filters
+ filters = self.params['filters']
+ if filters is not None:
+ kwargs['filters'] = filters
- domains = self.conn.search_domains(**args)
- self.exit_json(changed=False, openstack_domains=domains)
+ self.exit_json(changed=False,
+ domains=[d.to_dict(computed=False)
+ for d in self.conn.search_domains(**kwargs)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_group.py b/ansible_collections/openstack/cloud/plugins/modules/identity_group.py
index 5b45efa4b..e4de42cbb 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_group.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_group.py
@@ -1,99 +1,97 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_group
-short_description: Manage OpenStack Identity Groups
+short_description: Manage a OpenStack identity (Keystone) group
author: OpenStack Ansible SIG
description:
- - Manage OpenStack Identity Groups. Groups can be created, deleted or
- updated. Only the I(description) value can be updated.
+ - Create, update or delete an OpenStack identity (Keystone) group.
options:
- name:
- description:
- - Group name
- required: true
- type: str
- description:
- description:
- - Group description
- type: str
- domain_id:
- description:
- - Domain id to create the group in if the cloud supports domains.
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Group description.
+ type: str
+ domain_id:
+ description:
+ - Domain id to create the group in.
+ type: str
+ name:
+ description:
+ - Group name
+ required: true
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a group named "demo"
-- openstack.cloud.identity_group:
+EXAMPLES = r'''
+- name: Create a group named "demo"
+ openstack.cloud.identity_group:
cloud: mycloud
state: present
name: demo
description: "Demo Group"
domain_id: demoid
-# Update the description on existing "demo" group
-- openstack.cloud.identity_group:
+- name: Update the description on existing demo group
+ openstack.cloud.identity_group:
cloud: mycloud
state: present
name: demo
description: "Something else"
domain_id: demoid
-# Delete group named "demo"
-- openstack.cloud.identity_group:
+- name: Delete group named demo
+ openstack.cloud.identity_group:
cloud: mycloud
state: absent
name: demo
'''
-RETURN = '''
+RETURN = r'''
group:
- description: Dictionary describing the group.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- id:
- description: Unique group ID
- type: str
- sample: "ee6156ff04c645f481a6738311aea0b0"
- name:
- description: Group name
- type: str
- sample: "demo"
- description:
- description: Group description
- type: str
- sample: "Demo Group"
- domain_id:
- description: Domain for the group
- type: str
- sample: "default"
+ description: Dictionary describing the identity group.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ description:
+ description: Group description
+ type: str
+ sample: "Demo Group"
+ domain_id:
+ description: Domain for the group
+ type: str
+ sample: "default"
+ id:
+ description: Unique group ID
+ type: str
+ sample: "ee6156ff04c645f481a6738311aea0b0"
+ name:
+ description: Group name
+ type: str
+ sample: "demo"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
class IdentityGroupModule(OpenStackModule):
argument_spec = dict(
+ description=dict(),
+ domain_id=dict(),
name=dict(required=True),
- description=dict(required=False, default=None),
- domain_id=dict(required=False, default=None),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -101,51 +99,40 @@ class IdentityGroupModule(OpenStackModule):
supports_check_mode=True
)
- def _system_state_change(self, state, description, group):
- if state == 'present' and not group:
- return True
- if state == 'present' and description is not None and group.description != description:
- return True
- if state == 'absent' and group:
- return True
- return False
+ class _StateMachine(StateMachine):
+ def _find(self, attributes, **kwargs):
+ kwargs = dict((k, attributes[k])
+ for k in ['domain_id']
+ if k in attributes and attributes[k] is not None)
- def run(self):
- name = self.params.get('name')
- description = self.params.get('description')
- state = self.params.get('state')
+ return self.find_function(attributes['name'], **kwargs)
- domain_id = self.params.pop('domain_id')
-
- if domain_id:
- group = self.conn.get_group(name, filters={'domain_id': domain_id})
+ def run(self):
+ sm = self._StateMachine(connection=self.conn,
+ service_name='identity',
+ type_name='group',
+ sdk=self.sdk)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['state', 'timeout']
+ if self.params[k] is not None)
+
+ kwargs['attributes'] = \
+ dict((k, self.params[k])
+ for k in ['description', 'domain_id', 'name']
+ if self.params[k] is not None)
+
+ group, is_changed = sm(check_mode=self.ansible.check_mode,
+ updateable_attributes=None,
+ non_updateable_attributes=['domain_id'],
+ wait=False,
+ **kwargs)
+
+ if group is None:
+ self.exit_json(changed=is_changed)
else:
- group = self.conn.get_group(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, description, group))
-
- if state == 'present':
- if group is None:
- group = self.conn.create_group(
- name=name, description=description, domain=domain_id)
- changed = True
- else:
- if description is not None and group.description != description:
- group = self.conn.update_group(
- group.id, description=description)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, group=group)
-
- elif state == 'absent':
- if group is None:
- changed = False
- else:
- self.conn.delete_group(group.id)
- changed = True
- self.exit_json(changed=changed)
+ self.exit_json(changed=is_changed,
+ group=group.to_dict(computed=False))
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_group_info.py b/ansible_collections/openstack/cloud/plugins/modules/identity_group_info.py
index 68f00d73a..33240ddca 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_group_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_group_info.py
@@ -1,113 +1,77 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2019, Phillipe Smith <phillipelnx@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_group_info
-short_description: Retrieve info about one or more OpenStack groups
+short_description: Fetch OpenStack identity (Keystone) groups
author: OpenStack Ansible SIG
description:
- - Retrieve info about a one or more OpenStack groups.
+ - Fetch OpenStack identity (Keystone) groups.
options:
- name:
- description:
- - Name or ID of the group.
- type: str
- domain:
- description:
- - Name or ID of the domain containing the group if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ domain:
+ description:
+ - Name or ID of the domain containing the group.
+ type: str
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ type: dict
+ name:
+ description:
+ - Name or ID of the group.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather info about previously created groups
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about previously created groups
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group by name
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group by name
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group in a specific domain
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group in a specific domain
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- domain: admindomain
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group in a specific domain with filter
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group in a specific domain with filter
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- domain: admindomain
- filters:
- enabled: False
- register: openstack_groups
- - debug:
- var: openstack_groups
+EXAMPLES = r'''
+- name: Gather previously created groups
+ openstack.cloud.identity_group_info:
+ cloud: awesomecloud
+
+- name: Gather previously created groups by name
+ openstack.cloud.identity_group_info:
+ cloud: awesomecloud
+ name: demogroup
+
+- name: Gather previously created groups in a specific domain
+ openstack.cloud.identity_group_info:
+ cloud: awesomecloud
+ domain: admindomain
+
+- name: Gather and filter previously created groups
+ openstack.cloud.identity_group_info:
+ cloud: awesomecloud
+ name: demogroup
+ domain: admindomain
+ filters:
+ is_enabled: False
'''
-
-RETURN = '''
-openstack_groups:
- description: Dictionary describing all the matching groups.
- returned: always, but can be an empty list
- type: complex
- contains:
- name:
- description: Name given to the group.
- returned: success
- type: str
- description:
- description: Description of the group.
- returned: success
- type: str
- id:
- description: Unique UUID.
- returned: success
- type: str
- domain_id:
- description: Domain ID containing the group (keystone v3 clouds only)
- returned: success
- type: bool
+RETURN = r'''
+groups:
+ description: Dictionary describing all matching identity groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: Name given to the group.
+ type: str
+ description:
+ description: Description of the group.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ domain_id:
+ description: Domain ID containing the group (keystone v3 clouds only)
+ type: bool
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -115,9 +79,9 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityGroupInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, default=None),
- domain=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
+ domain=dict(),
+ filters=dict(type='dict'),
+ name=dict(),
)
module_kwargs = dict(
supports_check_mode=True
@@ -125,20 +89,19 @@ class IdentityGroupInfoModule(OpenStackModule):
def run(self):
name = self.params['name']
- domain = self.params['domain']
filters = self.params['filters'] or {}
- args = {}
- if domain:
- dom = self.conn.identity.find_domain(domain)
- if dom:
- args['domain_id'] = dom['id']
- else:
- self.fail_json(msg='Domain name or ID does not exist')
-
- groups = self.conn.search_groups(name, filters, **args)
- # groups is for backward (and forward) compatibility
- self.exit_json(changed=False, groups=groups, openstack_groups=groups)
+ kwargs = {}
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id:
+ domain = self.conn.identity.find_domain(domain_name_or_id)
+ if domain is None:
+ self.exit_json(changed=False, groups=[])
+ kwargs['domain_id'] = domain['id']
+
+ groups = self.conn.search_groups(name, filters, **kwargs)
+ self.exit_json(changed=False,
+ groups=[g.to_dict(computed=False) for g in groups])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_role.py b/ansible_collections/openstack/cloud/plugins/modules/identity_role.py
index 272d98216..61fe8cf6e 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_role.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_role.py
@@ -1,73 +1,89 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_role
-short_description: Manage OpenStack Identity Roles
+short_description: Manage a OpenStack identity (Keystone) role
author: OpenStack Ansible SIG
description:
- - Manage OpenStack Identity Roles.
+ - Create, update or delete a OpenStack identity (Keystone) role.
options:
- name:
- description:
- - Role Name
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Role description.
+ type: str
+ domain_id:
+ description:
+ - Domain id to create the role in.
+ type: str
+ name:
+ description:
+ - Role name.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a role named "demo"
-- openstack.cloud.identity_role:
+EXAMPLES = r'''
+- name: Create a role named demo
+ openstack.cloud.identity_role:
cloud: mycloud
state: present
name: demo
-# Delete the role named "demo"
-- openstack.cloud.identity_role:
+- name: Delete the role named demo
+ openstack.cloud.identity_role:
cloud: mycloud
state: absent
name: demo
'''
-RETURN = '''
+RETURN = r'''
role:
- description: Dictionary describing the role.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- domain_id:
- description: Domain to which the role belongs
- type: str
- sample: default
- id:
- description: Unique role ID.
- type: str
- sample: "677bfab34c844a01b88a217aa12ec4c2"
- name:
- description: Role name.
- type: str
- sample: "demo"
+ description: Dictionary describing the identity role.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ description:
+ description: Description of the role resource
+ type: str
+ sample: role description
+ domain_id:
+ description: Domain to which the role belongs
+ type: str
+ sample: default
+ id:
+ description: Unique role ID.
+ type: str
+ sample: "677bfab34c844a01b88a217aa12ec4c2"
+ links:
+ description: Links for the role resource
+ type: list
+ name:
+ description: Role name.
+ type: str
+ sample: "demo"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
class IdentityRoleModule(OpenStackModule):
argument_spec = dict(
+ description=dict(),
+ domain_id=dict(),
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -76,32 +92,40 @@ class IdentityRoleModule(OpenStackModule):
supports_check_mode=True
)
- def _system_state_change(self, state, role):
- if state == 'present' and not role:
- return True
- if state == 'absent' and role:
- return True
- return False
+ class _StateMachine(StateMachine):
+ def _find(self, attributes, **kwargs):
+ kwargs = dict((k, attributes[k])
+ for k in ['domain_id']
+ if k in attributes and attributes[k] is not None)
+
+ return self.find_function(attributes['name'], **kwargs)
def run(self):
- name = self.params.get('name')
- state = self.params.get('state')
-
- role = self.conn.get_role(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, role))
-
- changed = False
- if state == 'present':
- if role is None:
- role = self.conn.create_role(name=name)
- changed = True
- self.exit_json(changed=changed, role=role)
- elif state == 'absent' and role is not None:
- self.conn.identity.delete_role(role['id'])
- changed = True
- self.exit_json(changed=changed)
+ sm = self._StateMachine(connection=self.conn,
+ service_name='identity',
+ type_name='role',
+ sdk=self.sdk)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['state', 'timeout']
+ if self.params[k] is not None)
+
+ kwargs['attributes'] = \
+ dict((k, self.params[k])
+ for k in ['description', 'domain_id', 'name']
+ if self.params[k] is not None)
+
+ role, is_changed = sm(check_mode=self.ansible.check_mode,
+ updateable_attributes=None,
+ non_updateable_attributes=['domain_id'],
+ wait=False,
+ **kwargs)
+
+ if role is None:
+ self.exit_json(changed=is_changed)
+ else:
+ self.exit_json(changed=is_changed,
+ role=role.to_dict(computed=False))
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_role_info.py b/ansible_collections/openstack/cloud/plugins/modules/identity_role_info.py
index 42de17bd1..756c96969 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_role_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_role_info.py
@@ -1,72 +1,69 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2020, Sagi Shnaidman <sshnaidm@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_role_info
-short_description: Retrieve information about roles
+short_description: Fetch OpenStack identity (Keystone) roles
author: OpenStack Ansible SIG
description:
- - Get information about identity roles in Openstack
+ - Fetch OpenStack identity (Keystone) roles.
options:
domain_id:
description:
- - Domain ID which owns the role
+ - Domain ID which owns the role.
type: str
required: false
name:
description:
- - Name or ID of the role
+ - Name or ID of the role.
type: str
required: false
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-RETURN = '''
-openstack_roles:
- description: List of identity roles
+RETURN = r'''
+roles:
+ description: List of dictionaries describing matching identity roles.
returned: always
type: list
elements: dict
contains:
+ description:
+ description: User-facing description of the role.
+ type: str
+ domain_id:
+ description: References the domain ID which owns the role.
+ type: str
id:
description: Unique ID for the role
- returned: success
type: str
+ links:
+ description: The links for the service resources
+ type: dict
name:
description: Unique role name, within the owning domain.
- returned: success
- type: str
- domain_id:
- description: References the domain ID which owns the role.
- returned: success
type: str
'''
-EXAMPLES = '''
-# Retrieve info about all roles
-- openstack.cloud.identity_role_info:
+EXAMPLES = r'''
+- name: Retrieve info about all roles
+ openstack.cloud.identity_role_info:
cloud: mycloud
-# Retrieve info about all roles in specific domain
-- openstack.cloud.identity_role_info:
+- name: Retrieve info about all roles in specific domain
+ openstack.cloud.identity_role_info:
cloud: mycloud
domain_id: some_domain_id
-# Retrieve info about role 'admin'
-- openstack.cloud.identity_role_info:
+- name: Retrieve info about role 'admin'
+ openstack.cloud.identity_role_info:
cloud: mycloud
name: admin
-
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -74,8 +71,8 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityRoleInfoModule(OpenStackModule):
argument_spec = dict(
- domain_id=dict(type='str', required=False),
- name=dict(type='str', required=False),
+ domain_id=dict(),
+ name=dict(),
)
module_kwargs = dict(
@@ -83,14 +80,17 @@ class IdentityRoleInfoModule(OpenStackModule):
)
def run(self):
- params = {
- 'domain_id': self.params['domain_id'],
- 'name_or_id': self.params['name'],
- }
- params = {k: v for k, v in params.items() if v is not None}
-
- roles = self.conn.search_roles(**params)
- self.exit_json(changed=False, openstack_roles=roles)
+ kwargs = dict((k, self.params[k])
+ for k in ['domain_id']
+ if self.params[k] is not None)
+
+ name_or_id = self.params['name']
+ if name_or_id is not None:
+ kwargs['name_or_id'] = name_or_id
+
+ self.exit_json(changed=False,
+ roles=[r.to_dict(computed=False)
+ for r in self.conn.search_roles(**kwargs)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_user.py b/ansible_collections/openstack/cloud/plugins/modules/identity_user.py
index 047b3ed8b..dfcf1cfd6 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_user.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_user.py
@@ -1,74 +1,71 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_user
-short_description: Manage OpenStack Identity Users
+short_description: Manage a OpenStack identity (Keystone) user
author: OpenStack Ansible SIG
description:
- - Manage OpenStack Identity users. Users can be created,
- updated or deleted using this module. A user will be updated
- if I(name) matches an existing user and I(state) is present.
- The value for I(name) cannot be updated without deleting and
- re-creating the user.
+ - Create, update or delete a OpenStack identity (Keystone) user.
options:
- name:
- description:
- - Username for the user
- required: true
- type: str
- password:
- description:
- - Password for the user
- type: str
- update_password:
- required: false
- choices: ['always', 'on_create']
- default: on_create
- description:
- - C(always) will attempt to update password. C(on_create) will only
- set the password for newly created users.
- type: str
- email:
- description:
- - Email address for the user
- type: str
- description:
- description:
- - Description about the user
- type: str
- default_project:
- description:
- - Project name or ID that the user should be associated with by default
- type: str
- domain:
- description:
- - Domain to create the user in if the cloud supports domains
- type: str
- enabled:
- description:
- - Is the user enabled
- type: bool
- default: 'yes'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ default_project:
+ description:
+ - Name or ID of the project, the user should be created in.
+ type: str
+ description:
+ description:
+ - Description about the user.
+ type: str
+ domain:
+ description:
+ - Domain to create the user in if the cloud supports domains.
+ type: str
+ email:
+ description:
+ - Email address for the user.
+ type: str
+ is_enabled:
+ description:
+ - Whether the user is enabled or not.
+ type: bool
+ default: 'true'
+ aliases: ['enabled']
+ name:
+ description:
+ - Name of the user.
+ - I(name) cannot be updated without deleting and re-creating the user.
+ required: true
+ type: str
+ password:
+ description:
+ - Password for the user.
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+ update_password:
+ choices: ['always', 'on_create']
+ default: on_create
+ description:
+ - When I(update_password) is C(always), then the password will always be
+ updated.
+ - When I(update_password) is C(on_create), the the password is only set
+ when creating a user.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a user
-- openstack.cloud.identity_user:
+EXAMPLES = r'''
+- name: Create a user
+ openstack.cloud.identity_user:
cloud: mycloud
state: present
name: demouser
@@ -77,14 +74,14 @@ EXAMPLES = '''
domain: default
default_project: demo
-# Delete a user
-- openstack.cloud.identity_user:
+- name: Delete a user
+ openstack.cloud.identity_user:
cloud: mycloud
state: absent
name: demouser
-# Create a user but don't update password if user exists
-- openstack.cloud.identity_user:
+- name: Create a user but don't update password if user exists
+ openstack.cloud.identity_user:
cloud: mycloud
state: present
name: demouser
@@ -94,8 +91,8 @@ EXAMPLES = '''
domain: default
default_project: demo
-# Create a user without password
-- openstack.cloud.identity_user:
+- name: Create a user without password
+ openstack.cloud.identity_user:
cloud: mycloud
state: present
name: demouser
@@ -104,154 +101,137 @@ EXAMPLES = '''
default_project: demo
'''
-
-RETURN = '''
+RETURN = r'''
user:
- description: Dictionary describing the user.
- returned: On success when I(state) is 'present'
- type: dict
- contains:
- default_project_id:
- description: User default project ID. Only present with Keystone >= v3.
- returned: success
- type: str
- sample: "4427115787be45f08f0ec22a03bfc735"
- description:
- description: The description of this user
- returned: success
- type: str
- sample: "a user"
- domain_id:
- description: User domain ID. Only present with Keystone >= v3.
- returned: success
- type: str
- sample: "default"
- email:
- description: User email address
- returned: success
- type: str
- sample: "demo@example.com"
- id:
- description: User ID
- returned: success
- type: str
- sample: "f59382db809c43139982ca4189404650"
- enabled:
- description: Indicates whether the user is enabled
- type: bool
- name:
- description: Unique user name, within the owning domain
- returned: success
- type: str
- sample: "demouser"
- username:
- description: Username with Identity API v2 (OpenStack Pike or earlier) else Null
- returned: success
- type: str
-
+ description: Dictionary describing the identity user.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ default_project_id:
+ description: User default project ID. Only present with Keystone >= v3.
+ type: str
+ sample: "4427115787be45f08f0ec22a03bfc735"
+ description:
+ description: The description of this user
+ type: str
+ sample: "a user"
+ domain_id:
+ description: User domain ID. Only present with Keystone >= v3.
+ type: str
+ sample: "default"
+ email:
+ description: User email address
+ type: str
+ sample: "demo@example.com"
+ id:
+ description: User ID
+ type: str
+ sample: "f59382db809c43139982ca4189404650"
+ is_enabled:
+ description: Indicates whether the user is enabled
+ type: bool
+ links:
+ description: The links for the user resource
+ type: dict
+ elements: str
+ name:
+ description: Unique user name, within the owning domain
+ type: str
+ sample: "demouser"
+ password:
+ description: Credential used during authentication
+ type: str
+ password_expires_at:
+ description: The date and time when the password expires. The time zone
+ is UTC. A none value means the password never expires
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
class IdentityUserModule(OpenStackModule):
argument_spec = dict(
+ default_project=dict(),
+ description=dict(),
+ domain=dict(),
+ email=dict(),
+ is_enabled=dict(default=True, type='bool', aliases=['enabled']),
name=dict(required=True),
- password=dict(required=False, default=None, no_log=True),
- email=dict(required=False, default=None),
- default_project=dict(required=False, default=None),
- description=dict(type='str'),
- domain=dict(required=False, default=None),
- enabled=dict(default=True, type='bool'),
+ password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present']),
- update_password=dict(default='on_create', choices=['always', 'on_create']),
+ update_password=dict(default='on_create',
+ choices=['always', 'on_create']),
)
module_kwargs = dict()
- def _needs_update(self, params_dict, user):
- for k in params_dict:
- # We don't get password back in the user object, so assume any supplied
- # password is a change.
- if k == 'password':
- return True
- if k == 'default_project':
- if user['default_project_id'] != params_dict['default_project']:
- return True
- else:
- continue
- if user[k] != params_dict[k]:
- return True
- return False
+ class _StateMachine(StateMachine):
+ def _build_update(self, resource, attributes, updateable_attributes,
+ non_updateable_attributes,
+ update_password='on_create', **kwargs):
+ if update_password == 'always' and 'password' not in attributes:
+ self.ansible.fail_json(msg="update_password is 'always'"
+ " but password is missing")
+ elif update_password == 'on_create' and 'password' in attributes:
+ attributes.pop('password')
- def _get_domain_id(self, domain):
- dom_obj = self.conn.identity.find_domain(domain)
- if dom_obj is None:
- # Ok, let's hope the user is non-admin and passing a sane id
- return domain
- return dom_obj.id
+ return super()._build_update(resource, attributes,
+ updateable_attributes,
+ non_updateable_attributes, **kwargs)
- def _get_default_project_id(self, default_project, domain_id):
- project = self.conn.identity.find_project(default_project, domain_id=domain_id)
- if not project:
- self.fail_json(msg='Default project %s is not valid' % default_project)
- return project['id']
+ def _find(self, attributes, **kwargs):
+ query_args = dict((k, attributes[k])
+ for k in ['domain_id']
+ if k in attributes and attributes[k] is not None)
- def run(self):
- name = self.params['name']
- password = self.params.get('password')
- email = self.params['email']
- default_project = self.params['default_project']
- domain = self.params['domain']
- enabled = self.params['enabled']
- state = self.params['state']
- update_password = self.params['update_password']
- description = self.params['description']
+ return self.find_function(attributes['name'], **query_args)
- if domain:
- domain_id = self._get_domain_id(domain)
- user = self.conn.get_user(name, domain_id=domain_id)
+ def run(self):
+ sm = self._StateMachine(connection=self.conn,
+ service_name='identity',
+ type_name='user',
+ sdk=self.sdk,
+ ansible=self.ansible)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['state', 'timeout', 'update_password']
+ if self.params[k] is not None)
+
+ kwargs['attributes'] = \
+ dict((k, self.params[k])
+ for k in ['description', 'email', 'is_enabled', 'name',
+ 'password']
+ if self.params[k] is not None)
+
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id is not None:
+ domain = self.conn.identity.find_domain(domain_name_or_id,
+ ignore_missing=False)
+ kwargs['attributes']['domain_id'] = domain.id
+
+ default_project_name_or_id = self.params['default_project']
+ if default_project_name_or_id is not None:
+ query_args = dict((k, kwargs['attributes'][k])
+ for k in ['domain_id']
+ if k in kwargs['attributes']
+ and kwargs['attributes'][k] is not None)
+ project = self.conn.identity.find_project(
+ default_project_name_or_id, ignore_missing=False, **query_args)
+ kwargs['attributes']['default_project_id'] = project.id
+
+ user, is_changed = sm(check_mode=self.ansible.check_mode,
+ updateable_attributes=None,
+ non_updateable_attributes=['domain_id'],
+ wait=False,
+ **kwargs)
+
+ if user is None:
+ self.exit_json(changed=is_changed)
else:
- domain_id = None
- user = self.conn.get_user(name)
-
- changed = False
- if state == 'present':
- user_args = {
- 'name': name,
- 'email': email,
- 'domain_id': domain_id,
- 'description': description,
- 'enabled': enabled,
- }
- if default_project:
- default_project_id = self._get_default_project_id(
- default_project, domain_id)
- user_args['default_project'] = default_project_id
- user_args = {k: v for k, v in user_args.items() if v is not None}
-
- changed = False
- if user is None:
- if password:
- user_args['password'] = password
-
- user = self.conn.create_user(**user_args)
- changed = True
- else:
- if update_password == 'always':
- if not password:
- self.fail_json(msg="update_password is always but a password value is missing")
- user_args['password'] = password
-
- if self._needs_update(user_args, user):
- user = self.conn.update_user(user['id'], **user_args)
- changed = True
-
- self.exit_json(changed=changed, user=user)
- elif state == 'absent' and user is not None:
- self.conn.identity.delete_user(user['id'])
- changed = True
- self.exit_json(changed=changed)
+ self.exit_json(changed=is_changed,
+ user=user.to_dict(computed=False))
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/identity_user_info.py b/ansible_collections/openstack/cloud/plugins/modules/identity_user_info.py
index c0e0d9499..b55b9fbdf 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/identity_user_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/identity_user_info.py
@@ -1,115 +1,101 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: identity_user_info
-short_description: Retrieve information about one or more OpenStack users
+short_description: Fetch OpenStack identity (Keystone) users
author: OpenStack Ansible SIG
description:
- - Retrieve information about a one or more OpenStack users
- - This module was called C(openstack.cloud.identity_user_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.identity_user_info) module no longer returns C(ansible_facts)!
+ - Fetch OpenStack identity (Keystone) users.
options:
- name:
- description:
- - Name or ID of the user
- type: str
- domain:
- description:
- - Name or ID of the domain containing the user if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ domain:
+ description:
+ - Name or ID of the domain containing the user.
+ type: str
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ type: dict
+ name:
+ description:
+ - Name or ID of the user.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about previously created users
-- openstack.cloud.identity_user_info:
+EXAMPLES = r'''
+- name: Gather previously created users
+ openstack.cloud.identity_user_info:
cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-# Gather information about a previously created user by name
-- openstack.cloud.identity_user_info:
+- name: Gather previously created user by name
+ openstack.cloud.identity_user_info:
cloud: awesomecloud
name: demouser
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-# Gather information about a previously created user in a specific domain
-- openstack.cloud.identity_user_info:
+- name: Gather previously created user in a specific domain
+ openstack.cloud.identity_user_info:
cloud: awesomecloud
name: demouser
domain: admindomain
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-# Gather information about a previously created user in a specific domain with filter
-- openstack.cloud.identity_user_info:
+- name: Gather previously created user with filters
+ openstack.cloud.identity_user_info:
cloud: awesomecloud
name: demouser
domain: admindomain
filters:
- enabled: False
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
+ is_enabled: False
'''
-
-RETURN = '''
-openstack_users:
- description: has all the OpenStack information about users
- returned: always
- type: list
- elements: dict
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Username of the user.
- returned: success
- type: str
- default_project_id:
- description: Default project ID of the user
- returned: success
- type: str
- description:
- description: The description of this user
- returned: success
- type: str
- domain_id:
- description: Domain ID containing the user
- returned: success
- type: str
- email:
- description: Email of the user
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the user is enabled
- returned: success
- type: bool
- username:
- description: Username with Identity API v2 (OpenStack Pike or earlier) else Null
- returned: success
- type: str
+RETURN = r'''
+users:
+ description: Dictionary describing all matching identity users.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: Unique UUID.
+ type: str
+ name:
+ description: Username of the user.
+ type: str
+ default_project_id:
+ description: Default project ID of the user
+ type: str
+ description:
+ description: The description of this user
+ type: str
+ domain_id:
+ description: Domain ID containing the user
+ type: str
+ email:
+ description: Email of the user
+ type: str
+ is_enabled:
+ description: Flag to indicate if the user is enabled
+ type: bool
+ links:
+ description: The links for the user resource
+ type: dict
+ password:
+ description: The default form of credential used during authentication.
+ type: str
+ password_expires_at:
+ description: The date and time when the password expires. The time zone
+ is UTC. A Null value means the password never expires.
+ type: str
+ username:
+ description: Username with Identity API v2 (OpenStack Pike or earlier)
+ else Null.
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -117,31 +103,30 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityUserInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, default=None),
- domain=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
+ domain=dict(),
+ filters=dict(type='dict'),
+ name=dict(),
)
module_kwargs = dict(
supports_check_mode=True
)
- deprecated_names = ('openstack.cloud.identity_user_facts')
-
def run(self):
name = self.params['name']
- domain = self.params['domain']
- filters = self.params['filters']
-
- args = {}
- if domain:
- dom_obj = self.conn.identity.find_domain(domain)
- if dom_obj is None:
- self.fail_json(
- msg="Domain name or ID '{0}' does not exist".format(domain))
- args['domain_id'] = dom_obj.id
-
- users = self.conn.search_users(name, filters, **args)
- self.exit_json(changed=False, openstack_users=users)
+ filters = self.params['filters'] or {}
+
+ kwargs = {}
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id:
+ domain = self.conn.identity.find_domain(domain_name_or_id)
+ if domain is None:
+ self.exit_json(changed=False, groups=[])
+ kwargs['domain_id'] = domain['id']
+
+ self.exit_json(changed=False,
+ users=[u.to_dict(computed=False)
+ for u in self.conn.search_users(name, filters,
+ **kwargs)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/image.py b/ansible_collections/openstack/cloud/plugins/modules/image.py
index fae13a2e5..48527de16 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/image.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/image.py
@@ -1,125 +1,135 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# TODO(mordred): we need to support "location"(v1) and "locations"(v2)
-
-DOCUMENTATION = '''
----
+DOCUMENTATION = r'''
module: image
-short_description: Add/Delete images from OpenStack Cloud
+short_description: Manage images of OpenStack image (Glance) service.
author: OpenStack Ansible SIG
description:
- - Add or Remove images from the OpenStack Image Repository
+ - Create or delete images in OpenStack image (Glance) service.
options:
- name:
- description:
- - The name of the image when uploading - or the name/ID of the image if deleting
- required: true
- type: str
- id:
- description:
- - The ID of the image when uploading an image
- type: str
- checksum:
- description:
- - The checksum of the image
- type: str
- disk_format:
- description:
- - The format of the disk that is getting uploaded
- default: qcow2
- choices: ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']
- type: str
- container_format:
- description:
- - The format of the container
- default: bare
- choices: ['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']
- type: str
- project:
- description:
- - The name or ID of the project owning the image
- type: str
- aliases: ['owner']
- project_domain:
- description:
- - The domain the project owning the image belongs to
- - May be used to identify a unique project when providing a name to the project argument and multiple projects with such name exist
- type: str
- min_disk:
- description:
- - The minimum disk space (in GB) required to boot this image
- type: int
- min_ram:
- description:
- - The minimum ram (in MB) required to boot this image
- type: int
- is_public:
- description:
- - Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
- type: bool
- default: false
- protected:
- description:
- - Prevent image from being deleted
- type: bool
- default: false
- filename:
- description:
- - The path to the file which has to be uploaded
- type: str
- ramdisk:
- description:
- - The name of an existing ramdisk image that will be associated with this image
- type: str
- kernel:
- description:
- - The name of an existing kernel image that will be associated with this image
- type: str
- properties:
- description:
- - Additional properties to be associated with this image
- default: {}
- type: dict
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- tags:
- description:
- - List of tags to be applied to the image
- default: []
- type: list
- elements: str
- volume:
- description:
- - ID of a volume to create an image from.
- - The volume must be in AVAILABLE state.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ checksum:
+ description:
+ - The checksum of the image.
+ type: str
+ container_format:
+ description:
+ - The format of the container.
+ - This image attribute cannot be changed.
+ - Examples are C(ami), C(aki), C(ari), C(bare), C(ovf), C(ova) or
+ C(docker).
+ default: bare
+ type: str
+ disk_format:
+ description:
+ - The format of the disk that is getting uploaded.
+ - This image attribute cannot be changed.
+ - Examples are C(ami), C(ari), C(aki), C(vhd), C(vmdk), C(raw),
+ C(qcow2), C(vdi), c(iso), C(vhdx) or C(ploop).
+ default: qcow2
+ type: str
+ filename:
+ description:
+ - The path to the file which has to be uploaded.
+ - This image attribute cannot be changed.
+ type: str
+ id:
+ description:
+ - The ID of the image when uploading an image.
+ - This image attribute cannot be changed.
+ type: str
+ is_protected:
+ description:
+ - Prevent image from being deleted.
+ aliases: ['protected']
+ type: bool
+ is_public:
+ description:
+ - Whether the image can be accessed publicly.
+ - Setting I(is_public) to C(true) requires admin role by default.
+ - I(is_public) has been deprecated. Use I(visibility) instead of
+ I(is_public).
+ type: bool
+ default: false
+ kernel:
+ description:
+ - The name of an existing kernel image that will be associated with this
+ image.
+ type: str
+ min_disk:
+ description:
+ - The minimum disk space (in GB) required to boot this image.
+ type: int
+ min_ram:
+ description:
+ - The minimum ram (in MB) required to boot this image.
+ type: int
+ name:
+ description:
+ - The name of the image when uploading - or the name/ID of the image if
+ deleting.
+ - If provided with the id, it can be used to change the name of existing
+ image.
+ required: true
+ type: str
+ owner:
+ description:
+ - The name or ID of the project owning the image.
+ type: str
+ aliases: ['project']
+ owner_domain:
+ description:
+ - The name or id of the domain the project owning the image belongs to.
+ - May be used to identify a unique project when providing a name to the
+ project argument and multiple projects with such name exist.
+ type: str
+ aliases: ['project_domain']
+ properties:
+ description:
+ - Additional properties to be associated with this image.
+ default: {}
+ type: dict
+ ramdisk:
+ description:
+ - The name of an existing ramdisk image that will be associated with this
+ image.
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+ tags:
+ description:
+ - List of tags to be applied to the image.
+ default: []
+ type: list
+ elements: str
+ visibility:
+ description:
+ - The image visibility.
+ type: str
+ choices: [public, private, shared, community]
+ volume:
+ description:
+ - ID of a volume to create an image from.
+ - The volume must be in AVAILABLE state.
+ - I(volume) has been deprecated. Use module M(openstack.cloud.volume)
+ instead.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
-- openstack.cloud.image:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- openstack.cloud.identity_user_domain_name: Default
- openstack.cloud.project_domain_name: Default
+EXAMPLES = r'''
+- name: Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
+ openstack.cloud.image:
+ cloud: devstack-admin
name: cirros
container_format: bare
disk_format: qcow2
@@ -132,33 +142,229 @@ EXAMPLES = '''
properties:
cpu_arch: x86_64
distro: ubuntu
+'''
-# Create image from volume attached to an instance
-- name: create volume snapshot
- openstack.cloud.volume_snapshot:
- auth:
- "{{ auth }}"
- display_name: myvol_snapshot
- volume: myvol
- force: yes
- register: myvol_snapshot
-
-- name: create volume from snapshot
- openstack.cloud.volume:
- auth:
- "{{ auth }}"
- size: "{{ myvol_snapshot.snapshot.size }}"
- snapshot_id: "{{ myvol_snapshot.snapshot.id }}"
- display_name: myvol_snapshot_volume
- wait: yes
- register: myvol_snapshot_volume
-
-- name: create image from volume snapshot
- openstack.cloud.image:
- auth:
- "{{ auth }}"
- volume: "{{ myvol_snapshot_volume.volume.id }}"
- name: myvol_image
+RETURN = r'''
+image:
+ description: Dictionary describing the Glance image.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ id:
+ description: Unique UUID.
+ type: str
+ name:
+ description: Name given to the image.
+ type: str
+ status:
+ description: Image status.
+ type: str
+ architecture:
+ description: The CPU architecture that must be supported by
+ the hypervisor.
+ type: str
+ created_at:
+ description: Image created at timestamp.
+ type: str
+ container_format:
+ description: Container format of the image.
+ type: str
+ direct_url:
+ description: URL to access the image file kept in external store.
+ type: str
+ min_ram:
+ description: Min amount of RAM required for this image.
+ type: int
+ disk_format:
+ description: Disk format of the image.
+ type: str
+ file:
+ description: The URL for the virtual machine image file.
+ type: str
+ has_auto_disk_config:
+ description: If root partition on disk is automatically resized
+ before the instance boots.
+ type: bool
+ hash_algo:
+ description: The algorithm used to compute a secure hash of the
+ image data.
+ type: str
+ hash_value:
+ description: The hexdigest of the secure hash of the image data
+ computed using the algorithm whose name is the value of the
+ os_hash_algo property.
+ type: str
+ hw_cpu_cores:
+ description: Used to pin the virtual CPUs (vCPUs) of instances to
+ the host's physical CPU cores (pCPUs).
+ type: str
+ hw_cpu_policy:
+ description: The hexdigest of the secure hash of the image data.
+ type: str
+ hw_cpu_sockets:
+ description: Preferred number of sockets to expose to the guest.
+ type: str
+ hw_cpu_thread_policy:
+ description: Defines how hardware CPU threads in a simultaneous
+ multithreading-based (SMT) architecture be used.
+ type: str
+ hw_cpu_threads:
+ description: The preferred number of threads to expose to the guest.
+ type: str
+ hw_disk_bus:
+ description: Specifies the type of disk controller to attach disk
+ devices to.
+ type: str
+ hw_machine_type:
+ description: Enables booting an ARM system using the
+ specified machine type.
+ type: str
+ hw_qemu_guest_agent:
+ description: "A string boolean, which if 'true', QEMU guest agent
+ will be exposed to the instance."
+ type: str
+ hw_rng_model:
+ description: "Adds a random-number generator device to the image's
+ instances."
+ type: str
+ hw_scsi_model:
+ description: Enables the use of VirtIO SCSI (virtio-scsi) to
+ provide block device access for compute instances.
+ type: str
+ hw_video_model:
+ description: The video image driver used.
+ type: str
+ hw_video_ram:
+ description: Maximum RAM for the video image.
+ type: str
+ hw_vif_model:
+ description: Specifies the model of virtual network interface device to
+ use.
+ type: str
+ hw_watchdog_action:
+ description: Enables a virtual hardware watchdog device that
+ carries out the specified action if the server hangs.
+ type: str
+ hypervisor_type:
+ description: The hypervisor type.
+ type: str
+ instance_type_rxtx_factor:
+ description: Optional property allows created servers to have a
+ different bandwidth cap than that defined in the network
+ they are attached to.
+ type: str
+ instance_uuid:
+ description: For snapshot images, this is the UUID of the server
+ used to create this image.
+ type: str
+ is_hidden:
+ description: Controls whether an image is displayed in the default
+ image-list response
+ type: bool
+ is_hw_boot_menu_enabled:
+ description: Enables the BIOS bootmenu.
+ type: bool
+ is_hw_vif_multiqueue_enabled:
+ description: Enables the virtio-net multiqueue feature.
+ type: bool
+ kernel_id:
+ description: The ID of an image stored in the Image service that
+ should be used as the kernel when booting an AMI-style
+ image.
+ type: str
+ locations:
+ description: A list of URLs to access the image file in external store.
+ type: str
+ metadata:
+ description: The location metadata.
+ type: str
+ needs_config_drive:
+ description: Specifies whether the image needs a config drive.
+ type: bool
+ needs_secure_boot:
+ description: Whether Secure Boot is needed.
+ type: bool
+ os_admin_user:
+ description: The operating system admin username.
+ type: str
+ os_command_line:
+ description: The kernel command line to be used by libvirt driver.
+ type: str
+ os_distro:
+ description: The common name of the operating system distribution
+ in lowercase.
+ type: str
+ os_require_quiesce:
+ description: If true, require quiesce on snapshot via
+ QEMU guest agent.
+ type: str
+ os_shutdown_timeout:
+ description: Time for graceful shutdown.
+ type: str
+ os_type:
+ description: The operating system installed on the image.
+ type: str
+ os_version:
+ description: The operating system version as specified by
+ the distributor.
+ type: str
+ owner_id:
+ description: The ID of the owner, or project, of the image.
+ type: str
+ ramdisk_id:
+ description: The ID of image stored in the Image service that should
+ be used as the ramdisk when booting an AMI-style image.
+ type: str
+ schema:
+ description: URL for the schema describing a virtual machine image.
+ type: str
+ store:
+ description: Glance will attempt to store the disk image data in the
+ backing store indicated by the value of the header.
+ type: str
+ updated_at:
+ description: Image updated at timestamp.
+ type: str
+ url:
+ description: URL to access the image file kept in external store.
+ type: str
+ virtual_size:
+ description: The virtual size of the image.
+ type: str
+ vm_mode:
+ description: The virtual machine mode.
+ type: str
+ vmware_adaptertype:
+ description: The virtual SCSI or IDE controller used by the
+ hypervisor.
+ type: str
+ vmware_ostype:
+ description: Operating system installed in the image.
+ type: str
+ filters:
+ description: Additional properties associated with the image.
+ type: dict
+ min_disk:
+ description: Min amount of disk space required for this image.
+ type: int
+ is_protected:
+ description: Image protected flag.
+ type: bool
+ checksum:
+ description: Checksum for the image.
+ type: str
+ owner:
+ description: Owner for the image.
+ type: str
+ visibility:
+ description: Indicates who has access to the image.
+ type: str
+ size:
+ description: Size of the image.
+ type: int
+ tags:
+ description: List of tags assigned to the image
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -166,99 +372,152 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ImageModule(OpenStackModule):
- deprecated_names = ('os_image', 'openstack.cloud.os_image')
-
argument_spec = dict(
- name=dict(required=True, type='str'),
- id=dict(type='str'),
- checksum=dict(type='str'),
- disk_format=dict(default='qcow2',
- choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
- container_format=dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
- project=dict(type='str', aliases=['owner']),
- project_domain=dict(type='str'),
- min_disk=dict(type='int', default=0),
- min_ram=dict(type='int', default=0),
+ checksum=dict(),
+ container_format=dict(default='bare'),
+ disk_format=dict(default='qcow2'),
+ filename=dict(),
+ id=dict(),
+ is_protected=dict(type='bool', aliases=['protected']),
is_public=dict(type='bool', default=False),
- protected=dict(type='bool', default=False),
- filename=dict(type='str'),
- ramdisk=dict(type='str'),
- kernel=dict(type='str'),
+ kernel=dict(),
+ min_disk=dict(type='int'),
+ min_ram=dict(type='int'),
+ name=dict(required=True),
+ owner=dict(aliases=['project']),
+ owner_domain=dict(aliases=['project_domain']),
properties=dict(type='dict', default={}),
- volume=dict(type='str'),
- tags=dict(type='list', default=[], elements='str'),
+ ramdisk=dict(),
state=dict(default='present', choices=['absent', 'present']),
+ tags=dict(type='list', default=[], elements='str'),
+ visibility=dict(choices=['public', 'private', 'shared', 'community']),
+ volume=dict(),
)
module_kwargs = dict(
- mutually_exclusive=[['filename', 'volume']],
+ mutually_exclusive=[
+ ('filename', 'volume'),
+ ('visibility', 'is_public'),
+ ],
)
- def run(self):
+ # resource attributes obtainable directly from params
+ attr_params = ('id', 'name', 'filename', 'disk_format',
+ 'container_format', 'wait', 'timeout', 'is_public',
+ 'is_protected', 'min_disk', 'min_ram', 'volume', 'tags')
+
+ def _resolve_visibility(self):
+ """resolve a visibility value to be compatible with older versions"""
+ if self.params['visibility']:
+ return self.params['visibility']
+ if self.params['is_public'] is not None:
+ return 'public' if self.params['is_public'] else 'private'
+ return None
+
+ def _build_params(self, owner):
+ params = {attr: self.params[attr] for attr in self.attr_params}
+ if owner:
+ params['owner_id'] = owner.id
+ params['visibility'] = self._resolve_visibility()
+ params = {k: v for k, v in params.items() if v is not None}
+ return params
+
+ def _return_value(self, image_name_or_id):
+ image = self.conn.image.find_image(image_name_or_id)
+ if image:
+ image = image.to_dict(computed=False)
+ return image
+
+ def _build_update(self, image):
+ update_payload = {'visibility': self._resolve_visibility()}
+
+ for k in ('is_protected', 'min_disk', 'min_ram'):
+ update_payload[k] = self.params[k]
+
+ for k in ('kernel', 'ramdisk'):
+ if not self.params[k]:
+ continue
+ k_id = '{0}_id'.format(k)
+ k_image = self.conn.image.find_image(
+ name_or_id=self.params[k], ignore_missing=False)
+ update_payload[k_id] = k_image.id
+ update_payload = {k: v for k, v in update_payload.items()
+ if v is not None and image[k] != v}
+
+ for p, v in self.params['properties'].items():
+ if p not in image or image[p] != v:
+ update_payload[p] = v
+
+ if (self.params['tags']
+ and set(image['tags']) != set(self.params['tags'])):
+ update_payload['tags'] = self.params['tags']
+
+ # If both name and id are defined,then we might change the name
+ if self.params['id'] and \
+ self.params['name'] and \
+ self.params['name'] != image['name']:
+ update_payload['name'] = self.params['name']
+
+ return update_payload
+
+ def run(self):
changed = False
- if self.params['id']:
- image = self.conn.get_image(name_or_id=self.params['id'])
- elif self.params['checksum']:
- image = self.conn.get_image(name_or_id=self.params['name'], filters={'checksum': self.params['checksum']})
- else:
- image = self.conn.get_image(name_or_id=self.params['name'])
+ image_name_or_id = self.params['id'] or self.params['name']
+ owner_name_or_id = self.params['owner']
+ owner_domain_name_or_id = self.params['owner_domain']
+ owner_filters = {}
+ if owner_domain_name_or_id:
+ owner_domain = self.conn.identity.find_domain(
+ owner_domain_name_or_id)
+ if owner_domain:
+ owner_filters['domain_id'] = owner_domain.id
+ else:
+ # else user may not be able to enumerate domains
+ owner_filters['domain_id'] = owner_domain_name_or_id
+
+ owner = None
+ if owner_name_or_id:
+ owner = self.conn.identity.find_project(
+ owner_name_or_id, ignore_missing=False, **owner_filters)
+ image = None
+ if image_name_or_id:
+ image = self.conn.get_image(
+ image_name_or_id,
+ filters={k: self.params[k]
+ for k in ['checksum'] if self.params[k] is not None})
+
+ changed = False
if self.params['state'] == 'present':
+ attrs = self._build_params(owner)
if not image:
- kwargs = {}
- if self.params['id'] is not None:
- kwargs['id'] = self.params['id']
- if self.params['project']:
- project_domain = {'id': None}
- if self.params['project_domain']:
- project_domain = self.conn.get_domain(name_or_id=self.params['project_domain'])
- if not project_domain or project_domain['id'] is None:
- self.fail(msg='Project domain %s could not be found' % self.params['project_domain'])
- project = self.conn.get_project(name_or_id=self.params['project'], domain_id=project_domain['id'])
- if not project:
- self.fail(msg='Project %s could not be found' % self.params['project'])
- kwargs['owner'] = project['id']
- image = self.conn.create_image(
- name=self.params['name'],
- filename=self.params['filename'],
- disk_format=self.params['disk_format'],
- container_format=self.params['container_format'],
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- is_public=self.params['is_public'],
- protected=self.params['protected'],
- min_disk=self.params['min_disk'],
- min_ram=self.params['min_ram'],
- volume=self.params['volume'],
- tags=self.params['tags'],
- **kwargs
- )
+ # self.conn.image.create_image() cannot be used because it does
+ # not provide self.conn.create_image()'s volume parameter [0].
+ # [0] https://opendev.org/openstack/openstacksdk/src/commit/
+ # a41d04ea197439c2f134ce3554995693933a46ac/openstack/cloud/_image.py#L306
+ image = self.conn.create_image(**attrs)
changed = True
if not self.params['wait']:
- self.exit(changed=changed, image=image, id=image.id)
-
- self.conn.update_image_properties(
- image=image,
- kernel=self.params['kernel'],
- ramdisk=self.params['ramdisk'],
- protected=self.params['protected'],
- **self.params['properties'])
- if self.params['tags']:
- self.conn.image.update_image(image.id, tags=self.params['tags'])
- image = self.conn.get_image(name_or_id=image.id)
- self.exit(changed=changed, image=image, id=image.id)
-
- elif self.params['state'] == 'absent':
- if not image:
- changed = False
- else:
- self.conn.delete_image(
- name_or_id=self.params['name'],
- wait=self.params['wait'],
- timeout=self.params['timeout'])
+ self.exit_json(changed=changed,
+ image=self._return_value(image.id))
+
+ update_payload = self._build_update(image)
+
+ if update_payload:
+ self.conn.image.update_image(image.id, **update_payload)
changed = True
- self.exit(changed=changed)
+
+ self.exit_json(changed=changed, image=self._return_value(image.id))
+
+ elif self.params['state'] == 'absent' and image is not None:
+ # self.conn.image.delete_image() does not offer a wait parameter
+ self.conn.delete_image(
+ name_or_id=image['id'],
+ wait=self.params['wait'],
+ timeout=self.params['timeout'])
+ changed = True
+ self.exit_json(changed=changed)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/image_info.py b/ansible_collections/openstack/cloud/plugins/modules/image_info.py
index f02079c00..157e9d02e 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/image_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/image_info.py
@@ -1,198 +1,296 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: image_info
-short_description: Retrieve information about an image within OpenStack.
+short_description: Fetch images from OpenStack image (Glance) service.
author: OpenStack Ansible SIG
description:
- - Retrieve information about a image image from OpenStack.
- - This module was called C(openstack.cloud.image_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.image_info) module no longer returns C(ansible_facts)!
+ - Fetch images from OpenStack image (Glance) service.
options:
- image:
- description:
- - Name or ID of the image
- required: false
- type: str
- filters:
- description:
- - Dict of properties of the images used for query
- type: dict
- required: false
- aliases: ['properties']
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ name:
+ description:
+ - Name or ID of the image
+ type: str
+ aliases: ['image']
+ filters:
+ description:
+ - Dict of properties of the images used for query
+ type: dict
+ aliases: ['properties']
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-- name: Gather information about a previously created image named image1
+EXAMPLES = r'''
+- name: Gather previously created image named image1
openstack.cloud.image_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
+ cloud: devstack-admin
image: image1
- register: result
-
-- name: Show openstack information
- debug:
- msg: "{{ result.image }}"
-# Show all available Openstack images
-- name: Retrieve all available Openstack images
+- name: List all images
openstack.cloud.image_info:
- register: result
-
-- name: Show images
- debug:
- msg: "{{ result.image }}"
-# Show images matching requested properties
-- name: Retrieve images having properties with desired values
- openstack.cloud.image_facts:
+- name: Retrieve and filter images
+ openstack.cloud.image_info:
filters:
- some_property: some_value
- OtherProp: OtherVal
-
-- name: Show images
- debug:
- msg: "{{ result.image }}"
+ is_protected: False
'''
-RETURN = '''
-openstack_images:
- description: has all the openstack information about the image
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the image.
- returned: success
- type: str
- status:
- description: Image status.
- returned: success
- type: str
- created_at:
- description: Image created at timestamp.
- returned: success
- type: str
- container_format:
- description: Container format of the image.
- returned: success
- type: str
- direct_url:
- description: URL to access the image file kept in external store.
- returned: success
- type: str
- min_ram:
- description: Min amount of RAM required for this image.
- returned: success
- type: int
- disk_format:
- description: Disk format of the image.
- returned: success
- type: str
- file:
- description: The URL for the virtual machine image file.
- returned: success
- type: str
- os_hidden:
- description: Controls whether an image is displayed in the default image-list response
- returned: success
- type: bool
- locations:
- description: A list of URLs to access the image file in external store.
- returned: success
- type: str
- metadata:
- description: The location metadata.
- returned: success
- type: str
- schema:
- description: URL for the schema describing a virtual machine image.
- returned: success
- type: str
- updated_at:
- description: Image updated at timestamp.
- returned: success
- type: str
- virtual_size:
- description: The virtual size of the image.
- returned: success
- type: str
- min_disk:
- description: Min amount of disk space required for this image.
- returned: success
- type: int
- is_protected:
- description: Image protected flag.
- returned: success
- type: bool
- checksum:
- description: Checksum for the image.
- returned: success
- type: str
- owner:
- description: Owner for the image.
- returned: success
- type: str
- visibility:
- description: Indicates who has access to the image.
- returned: success
- type: str
- size:
- description: Size of the image.
- returned: success
- type: int
- tags:
- description: List of tags assigned to the image
- returned: success
- type: list
+RETURN = r'''
+images:
+ description: List of dictionaries describing matching images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: Unique UUID.
+ type: str
+ name:
+ description: Name given to the image.
+ type: str
+ status:
+ description: Image status.
+ type: str
+ architecture:
+ description: The CPU architecture that must be supported by
+ the hypervisor.
+ type: str
+ created_at:
+ description: Image created at timestamp.
+ type: str
+ container_format:
+ description: Container format of the image.
+ type: str
+ direct_url:
+ description: URL to access the image file kept in external store.
+ type: str
+ min_ram:
+ description: Min amount of RAM required for this image.
+ type: int
+ disk_format:
+ description: Disk format of the image.
+ type: str
+ file:
+ description: The URL for the virtual machine image file.
+ type: str
+ has_auto_disk_config:
+ description: If root partition on disk is automatically resized
+ before the instance boots.
+ type: bool
+ hash_algo:
+ description: The algorithm used to compute a secure hash of the
+ image data.
+ type: str
+ hash_value:
+ description: The hexdigest of the secure hash of the image data
+ computed using the algorithm whose name is the value of the
+ os_hash_algo property.
+ type: str
+ hw_cpu_cores:
+ description: Used to pin the virtual CPUs (vCPUs) of instances to
+ the host's physical CPU cores (pCPUs).
+ type: str
+ hw_cpu_policy:
+ description: The hexdigest of the secure hash of the image data.
+ type: str
+ hw_cpu_sockets:
+ description: Preferred number of sockets to expose to the guest.
+ type: str
+ hw_cpu_thread_policy:
+ description: Defines how hardware CPU threads in a simultaneous
+ multithreading-based (SMT) architecture be used.
+ type: str
+ hw_cpu_threads:
+ description: The preferred number of threads to expose to the guest.
+ type: str
+ hw_disk_bus:
+ description: Specifies the type of disk controller to attach disk
+ devices to.
+ type: str
+ hw_machine_type:
+ description: Enables booting an ARM system using the
+ specified machine type.
+ type: str
+ hw_qemu_guest_agent:
+ description: "A string boolean, which if 'true', QEMU guest agent
+ will be exposed to the instance."
+ type: str
+ hw_rng_model:
+ description: "Adds a random-number generator device to the image's
+ instances."
+ type: str
+ hw_scsi_model:
+ description: Enables the use of VirtIO SCSI (virtio-scsi) to
+ provide block device access for compute instances.
+ type: str
+ hw_video_model:
+ description: The video image driver used.
+ type: str
+ hw_video_ram:
+ description: Maximum RAM for the video image.
+ type: str
+ hw_vif_model:
+ description: Specifies the model of virtual network interface device to
+ use.
+ type: str
+ hw_watchdog_action:
+ description: Enables a virtual hardware watchdog device that
+ carries out the specified action if the server hangs.
+ type: str
+ hypervisor_type:
+ description: The hypervisor type.
+ type: str
+ instance_type_rxtx_factor:
+ description: Optional property allows created servers to have a
+ different bandwidth cap than that defined in the network
+ they are attached to.
+ type: str
+ instance_uuid:
+ description: For snapshot images, this is the UUID of the server
+ used to create this image.
+ type: str
+ is_hidden:
+ description: Controls whether an image is displayed in the default
+ image-list response
+ type: bool
+ is_hw_boot_menu_enabled:
+ description: Enables the BIOS bootmenu.
+ type: bool
+ is_hw_vif_multiqueue_enabled:
+ description: Enables the virtio-net multiqueue feature.
+ type: bool
+ kernel_id:
+ description: The ID of an image stored in the Image service that
+ should be used as the kernel when booting an AMI-style
+ image.
+ type: str
+ locations:
+ description: A list of URLs to access the image file in external store.
+ type: str
+ metadata:
+ description: The location metadata.
+ type: str
+ needs_config_drive:
+ description: Specifies whether the image needs a config drive.
+ type: bool
+ needs_secure_boot:
+ description: Whether Secure Boot is needed.
+ type: bool
+ os_admin_user:
+ description: The operating system admin username.
+ type: str
+ os_command_line:
+ description: The kernel command line to be used by libvirt driver.
+ type: str
+ os_distro:
+ description: The common name of the operating system distribution
+ in lowercase.
+ type: str
+ os_require_quiesce:
+ description: If true, require quiesce on snapshot via
+ QEMU guest agent.
+ type: str
+ os_shutdown_timeout:
+ description: Time for graceful shutdown.
+ type: str
+ os_type:
+ description: The operating system installed on the image.
+ type: str
+ os_version:
+ description: The operating system version as specified by
+ the distributor.
+ type: str
+ owner_id:
+ description: The ID of the owner, or project, of the image.
+ type: str
+ ramdisk_id:
+ description: The ID of image stored in the Image service that should
+ be used as the ramdisk when booting an AMI-style image.
+ type: str
+ schema:
+ description: URL for the schema describing a virtual machine image.
+ type: str
+ store:
+ description: Glance will attempt to store the disk image data in the
+ backing store indicated by the value of the header.
+ type: str
+ updated_at:
+ description: Image updated at timestamp.
+ type: str
+ url:
+ description: URL to access the image file kept in external store.
+ type: str
+ virtual_size:
+ description: The virtual size of the image.
+ type: str
+ vm_mode:
+ description: The virtual machine mode.
+ type: str
+ vmware_adaptertype:
+ description: The virtual SCSI or IDE controller used by the
+ hypervisor.
+ type: str
+ vmware_ostype:
+ description: Operating system installed in the image.
+ type: str
+ filters:
+ description: Additional properties associated with the image.
+ type: dict
+ min_disk:
+ description: Min amount of disk space required for this image.
+ type: int
+ is_protected:
+ description: Image protected flag.
+ type: bool
+ checksum:
+ description: Checksum for the image.
+ type: str
+ owner:
+ description: Owner for the image.
+ type: str
+ visibility:
+ description: Indicates who has access to the image.
+ type: str
+ size:
+ description: Size of the image.
+ type: int
+ tags:
+ description: List of tags assigned to the image
+ type: list
'''
+
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class ImageInfoModule(OpenStackModule):
- deprecated_names = ('openstack.cloud.os_image_facts', 'openstack.cloud.os_image_info')
-
argument_spec = dict(
- image=dict(type='str', required=False),
- filters=dict(type='dict', required=False, aliases=['properties']),
+ filters=dict(type='dict', aliases=['properties']),
+ name=dict(aliases=['image']),
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- args = {
- 'name_or_id': self.params['image'],
- 'filters': self.params['filters'],
- }
- args = {k: v for k, v in args.items() if v is not None}
- images = self.conn.search_images(**args)
-
- # for backward compatibility
- if 'name_or_id' in args:
- image = images[0] if images else None
- else:
- image = images
-
- self.exit(changed=False, openstack_images=images, image=image)
+ kwargs = dict((k, self.params[k])
+ for k in ['filters']
+ if self.params[k] is not None)
+
+ name_or_id = self.params['name']
+ if name_or_id is not None:
+ kwargs['name_or_id'] = name_or_id
+
+ self.exit(changed=False,
+ images=[i.to_dict(computed=False)
+ for i in self.conn.search_images(**kwargs)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/keypair.py b/ansible_collections/openstack/cloud/plugins/modules/keypair.py
index 232d4985e..1e75ae3bc 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/keypair.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/keypair.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
@@ -36,10 +37,6 @@ options:
choices: [present, absent, replace]
default: present
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -60,23 +57,49 @@ EXAMPLES = '''
'''
RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: Name given to the keypair.
- returned: success
- type: str
-public_key:
- description: The public key value for the keypair.
- returned: success
- type: str
-private_key:
- description: The private key value for the keypair.
- returned: Only when a keypair is generated for the user (e.g., when creating one
- and a public key is not specified).
- type: str
+keypair:
+ description: Dictionary describing the keypair.
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ created_at:
+ description: Date the keypair was created
+ returned: success
+ type: str
+ fingerprint:
+ description: The short fingerprint associated with the public_key
+ for this keypair.
+ returned: success
+ type: str
+ id:
+ description: Unique UUID.
+ returned: success
+ type: str
+ is_deleted:
+ description: Whether the keypair is deleted or not
+ returned: success
+ type: bool
+ name:
+ description: Name given to the keypair.
+ returned: success
+ type: str
+ private_key:
+ description: The private key value for the keypair.
+ returned: Only when a keypair is generated for the user (e.g., when
+ creating one and a public key is not specified).
+ type: str
+ public_key:
+ description: The public key value for the keypair.
+ returned: success
+ type: str
+ type:
+ description: The type of keypair
+ returned: success
+ type: str
+ user_id:
+ description: The user id for a keypair
+ returned: success
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
@@ -84,12 +107,11 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
class KeyPairModule(OpenStackModule):
- deprecated_names = ('os_keypair', 'openstack.cloud.os_keypair')
argument_spec = dict(
name=dict(required=True),
- public_key=dict(default=None),
- public_key_file=dict(default=None),
+ public_key=dict(),
+ public_key_file=dict(),
state=dict(default='present',
choices=['absent', 'present', 'replace']),
)
@@ -115,11 +137,12 @@ class KeyPairModule(OpenStackModule):
with open(self.params['public_key_file']) as public_key_fh:
public_key = public_key_fh.read()
- keypair = self.conn.get_keypair(name)
+ keypair = self.conn.compute.find_keypair(name)
if self.ansible.check_mode:
self.exit_json(changed=self._system_state_change(keypair))
+ changed = False
if state in ('present', 'replace'):
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
@@ -129,20 +152,19 @@ class KeyPairModule(OpenStackModule):
" as offered. Delete key first." % name
)
else:
- self.conn.delete_keypair(name)
+ self.conn.compute.delete_keypair(keypair)
keypair = self.conn.create_keypair(name, public_key)
changed = True
- else:
- changed = False
else:
keypair = self.conn.create_keypair(name, public_key)
changed = True
- self.exit_json(changed=changed, key=keypair, id=keypair['id'])
+ self.exit_json(
+ changed=changed, keypair=keypair.to_dict(computed=False))
elif state == 'absent':
if keypair:
- self.conn.delete_keypair(name)
+ self.conn.compute.delete_keypair(keypair)
self.exit_json(changed=True)
self.exit_json(changed=False)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/keypair_info.py b/ansible_collections/openstack/cloud/plugins/modules/keypair_info.py
index 1fffe2c89..52111eae5 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/keypair_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/keypair_info.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2021 T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -28,10 +29,6 @@ options:
description:
- The last-seen item.
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -51,7 +48,7 @@ EXAMPLES = '''
'''
RETURN = '''
-openstack_keypairs:
+keypairs:
description:
- Lists keypairs that are associated with the account.
type: list
@@ -111,10 +108,10 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
class KeyPairInfoModule(OpenStackModule):
argument_spec = dict(
- name=dict(type='str', required=False),
- user_id=dict(type='str', required=False),
- limit=dict(type='int', required=False),
- marker=dict(type='str', required=False)
+ name=dict(),
+ user_id=dict(),
+ limit=dict(type='int'),
+ marker=dict()
)
module_kwargs = dict(
supports_check_mode=True
@@ -126,10 +123,8 @@ class KeyPairInfoModule(OpenStackModule):
if self.params[k] is not None}
keypairs = self.conn.search_keypairs(name_or_id=self.params['name'],
filters=filters)
- # self.conn.search_keypairs() returned munch.Munch objects before Train
- result = [raw if isinstance(raw, dict) else raw.to_dict(computed=False)
- for raw in keypairs]
- self.exit(changed=False, openstack_keypairs=result)
+ result = [raw.to_dict(computed=False) for raw in keypairs]
+ self.exit(changed=False, keypairs=result)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol.py b/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol.py
index 5a33d8a32..ef6d892fc 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol.py
@@ -1,63 +1,85 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: keystone_federation_protocol
-short_description: manage a federation Protocol
+short_description: Manage a Keystone federation protocol
author: OpenStack Ansible SIG
description:
- - Manage a federation Protocol.
+ - Manage a Keystone federation protocol.
options:
name:
description:
- - The name of the Protocol.
+ - ID or name of the federation protocol.
+ - This attribute cannot be updated.
type: str
required: true
aliases: ['id']
- state:
+ idp:
description:
- - Whether the protocol should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
+ - ID or name of the identity provider this protocol is associated with.
+ - This attribute cannot be updated.
+ aliases: ['idp_id', 'idp_name']
+ required: true
type: str
- idp_id:
+ mapping:
description:
- - The name of the Identity Provider this Protocol is associated with.
- aliases: ['idp_name']
- required: true
+ - ID or name of the mapping to use for this protocol.
+ - Required when creating a new protocol.
type: str
- mapping_id:
+ aliases: ['mapping_id', 'mapping_name']
+ state:
description:
- - The name of the Mapping to use for this Protocol.'
- - Required when creating a new Protocol.
+ - Whether the protocol should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
type: str
- aliases: ['mapping_name']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+notes:
+ - Name equals the ID of a federation protocol.
+ - Name equals the ID of an identity provider.
+ - Name equals the ID of a mapping.
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- name: Create a protocol
openstack.cloud.keystone_federation_protocol:
cloud: example_cloud
name: example_protocol
- idp_id: example_idp
- mapping_id: example_mapping
+ idp: example_idp
+ mapping: example_mapping
- name: Delete a protocol
openstack.cloud.keystone_federation_protocol:
cloud: example_cloud
name: example_protocol
- idp_id: example_idp
+ idp: example_idp
state: absent
'''
-RETURN = '''
+RETURN = r'''
+protocol:
+ description: Dictionary describing the federation protocol.
+ returned: always
+ type: dict
+ contains:
+ id:
+ description: ID of the federation protocol.
+ returned: success
+ type: str
+ mapping_id:
+ description: The definition of the federation protocol.
+ returned: success
+ type: str
+ name:
+ description: Name of the protocol. Equal to C(id).
+ returned: success
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -67,115 +89,91 @@ class IdentityFederationProtocolModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True, aliases=['id']),
state=dict(default='present', choices=['absent', 'present']),
- idp_id=dict(required=True, aliases=['idp_name']),
- mapping_id=dict(aliases=['mapping_name']),
+ idp=dict(required=True, aliases=['idp_id', 'idp_name']),
+ mapping=dict(aliases=['mapping_id', 'mapping_name']),
)
module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('mapping',)),
+ ],
supports_check_mode=True
)
- def normalize_protocol(self, protocol):
- """
- Normalizes the protocol definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if protocol is None:
- return None
-
- _protocol = protocol.to_dict()
- _protocol['name'] = protocol['id']
- # As of 0.44 SDK doesn't copy the URI parameters over, so let's add them
- _protocol['idp_id'] = protocol['idp_id']
- return _protocol
-
- def delete_protocol(self, protocol):
- """
- Delete an existing Protocol
-
- returns: the "Changed" state
- """
- if protocol is None:
- return False
-
- if self.ansible.check_mode:
- return True
-
- self.conn.identity.delete_federation_protocol(None, protocol)
- return True
+ def run(self):
+ state = self.params['state']
- def create_protocol(self, name):
- """
- Create a new Protocol
+ id = self.params['name']
+ idp_id = self.params['idp']
+ protocol = self.conn.identity.find_federation_protocol(idp_id, id)
- returns: the "Changed" state and the new protocol
- """
if self.ansible.check_mode:
- return True, None
+ self.exit_json(changed=self._will_change(state, protocol))
- idp_name = self.params.get('idp_id')
- mapping_id = self.params.get('mapping_id')
+ if state == 'present' and not protocol:
+ # Create protocol
+ protocol = self._create()
+ self.exit_json(changed=True,
+ protocol=protocol.to_dict(computed=False))
- attributes = {
- 'idp_id': idp_name,
- 'mapping_id': mapping_id,
- }
+ elif state == 'present' and protocol:
+ # Update protocol
+ update = self._build_update(protocol)
+ if update:
+ protocol = self._update(protocol, update)
- protocol = self.conn.identity.create_federation_protocol(id=name, **attributes)
- return (True, protocol)
+ self.exit_json(changed=bool(update),
+ protocol=protocol.to_dict(computed=False))
- def update_protocol(self, protocol):
- """
- Update an existing Protocol
+ elif state == 'absent' and protocol:
+ # Delete protocol
+ self._delete(protocol)
+ self.exit_json(changed=True)
- returns: the "Changed" state and the new protocol
- """
- mapping_id = self.params.get('mapping_id')
+ elif state == 'absent' and not protocol:
+ # Do nothing
+ self.exit_json(changed=False)
- attributes = {}
+ def _build_update(self, protocol):
+ update = {}
- if (mapping_id is not None) and (mapping_id != protocol.mapping_id):
- attributes['mapping_id'] = mapping_id
+ attributes = dict(
+ (k, self.params[p])
+ for (p, k) in {'mapping': 'mapping_id'}.items()
+ if p in self.params and self.params[p] is not None
+ and self.params[p] != protocol[k])
- if not attributes:
- return False, protocol
+ if attributes:
+ update['attributes'] = attributes
- if self.ansible.check_mode:
- return True, None
+ return update
- new_protocol = self.conn.identity.update_federation_protocol(None, protocol, **attributes)
- return (True, new_protocol)
+ def _create(self):
+ return self.conn.identity.create_federation_protocol(
+ id=self.params['name'],
+ idp_id=self.params['idp'],
+ mapping_id=self.params['mapping'])
- def run(self):
- """ Module entry point """
- name = self.params.get('name')
- state = self.params.get('state')
- idp = self.params.get('idp_id')
- changed = False
+ def _delete(self, protocol):
+ self.conn.identity.delete_federation_protocol(None, protocol)
- protocol = self.conn.identity.find_federation_protocol(idp, name)
+ def _update(self, protocol, update):
+ attributes = update.get('attributes')
+ if attributes:
+ protocol = self.conn.identity.update_federation_protocol(
+ protocol.idp_id, protocol.id, **attributes)
- if state == 'absent':
- if protocol is not None:
- changed = self.delete_protocol(protocol)
- self.exit_json(changed=changed)
+ return protocol
- # state == 'present'
+ def _will_change(self, state, protocol):
+ if state == 'present' and not protocol:
+ return True
+ elif state == 'present' and protocol:
+ return bool(self._build_update(protocol))
+ elif state == 'absent' and protocol:
+ return True
else:
- if protocol is None:
- if self.params.get('mapping_id') is None:
- self.fail_json(
- msg='A mapping_id must be passed when creating'
- ' a protocol')
- (changed, protocol) = self.create_protocol(name)
- protocol = self.normalize_protocol(protocol)
- self.exit_json(changed=changed, protocol=protocol)
-
- else:
- (changed, new_protocol) = self.update_protocol(protocol)
- new_protocol = self.normalize_protocol(new_protocol)
- self.exit_json(changed=changed, protocol=new_protocol)
+ # state == 'absent' and not protocol:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol_info.py b/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol_info.py
index b281b13e3..fdb315930 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/keystone_federation_protocol_info.py
@@ -1,48 +1,67 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: keystone_federation_protocol_info
-short_description: get information about federation Protocols
+short_description: Fetch Keystone federation protocols
author: OpenStack Ansible SIG
description:
- - Get information about federation Protocols.
+ - Fetch Keystone federation protocols.
options:
name:
description:
- - The name of the Protocol.
+ - ID or name of the federation protocol.
type: str
aliases: ['id']
- idp_id:
+ idp:
description:
- - The name of the Identity Provider this Protocol is associated with.
- aliases: ['idp_name']
+ - ID or name of the identity provider this protocol is associated with.
+ aliases: ['idp_id', 'idp_name']
required: true
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
+notes:
+ - Name equals the ID of a federation protocol.
+ - Name equals the ID of an identity provider.
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-- name: Describe a protocol
+EXAMPLES = r'''
+- name: Fetch all federation protocols attached to an identity provider
openstack.cloud.keystone_federation_protocol_info:
cloud: example_cloud
- name: example_protocol
- idp_id: example_idp
- mapping_name: example_mapping
+ idp: example_idp
-- name: Describe all protocols attached to an IDP
+- name: Fetch federation protocol by name
openstack.cloud.keystone_federation_protocol_info:
cloud: example_cloud
- idp_id: example_idp
+ idp: example_idp
+ name: example_protocol
'''
-RETURN = '''
+RETURN = r'''
+protocols:
+ description: List of federation protocol dictionaries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: ID of the federation protocol.
+ returned: success
+ type: str
+ mapping_id:
+ description: The definition of the federation protocol.
+ returned: success
+ type: str
+ name:
+ description: Name of the protocol. Equal to C(id).
+ returned: success
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -51,42 +70,29 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityFederationProtocolInfoModule(OpenStackModule):
argument_spec = dict(
name=dict(aliases=['id']),
- idp_id=dict(required=True, aliases=['idp_name']),
+ idp=dict(required=True, aliases=['idp_id', 'idp_name']),
)
+
module_kwargs = dict(
supports_check_mode=True
)
- def normalize_protocol(self, protocol):
- """
- Normalizes the protocol definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if protocol is None:
- return None
-
- _protocol = protocol.to_dict()
- _protocol['name'] = protocol['id']
- # As of 0.44 SDK doesn't copy the URI parameters over, so let's add them
- _protocol['idp_id'] = protocol['idp_id']
- return _protocol
-
def run(self):
- """ Module entry point """
+ # name is id for federation protocols
+ id = self.params['name']
- name = self.params.get('name')
- idp = self.params.get('idp_id')
-
- if name:
- protocol = self.conn.identity.get_federation_protocol(idp, name)
- protocol = self.normalize_protocol(protocol)
- self.exit_json(changed=False, protocols=[protocol])
+ # name is id for identity providers
+ idp_id = self.params['idp']
+ if id:
+ protocol = self.conn.identity.find_federation_protocol(idp_id, id)
+ protocols = [protocol] if protocol else []
else:
- protocols = list(map(self.normalize_protocol, self.conn.identity.federation_protocols(idp)))
- self.exit_json(changed=False, protocols=protocols)
+ protocols = self.conn.identity.federation_protocols(idp_id)
+
+ self.exit_json(changed=False,
+ protocols=[p.to_dict(computed=False)
+ for p in protocols])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/lb_health_monitor.py b/ansible_collections/openstack/cloud/plugins/modules/lb_health_monitor.py
index 94de4485e..a8678b53a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/lb_health_monitor.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/lb_health_monitor.py
@@ -1,289 +1,356 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2020 Jesper Schmitz Mouridsen.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: lb_health_monitor
author: OpenStack Ansible SIG
-short_description: Add/Delete a health m nonitor to a pool in the load balancing service from OpenStack Cloud
+short_description: Manage health monitor in a OpenStack load-balancer pool
description:
- - Add or Remove a health monitor to/from a pool in the OpenStack load-balancer service.
+ - Add, update or remove health monitor from a load-balancer pool in OpenStack
+ cloud.
options:
- name:
- type: 'str'
- description:
- - Name that has to be given to the health monitor
- required: true
- state:
- type: 'str'
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- pool:
- required: true
- type: 'str'
- description:
- - The pool name or id to monitor by the health monitor.
- type:
- type: 'str'
- default: HTTP
- description:
- - One of HTTP, HTTPS, PING, SCTP, TCP, TLS-HELLO, or UDP-CONNECT.
- choices: [HTTP, HTTPS, PING, SCTP, TCP, TLS-HELLO, UDP-CONNECT]
- delay:
- type: 'str'
- required: true
- description:
- - the interval, in seconds, between health checks.
- max_retries:
- required: true
- type: 'str'
- description:
- - The number of successful checks before changing the operating status of the member to ONLINE.
- max_retries_down:
- type: 'str'
- default: '3'
- description:
- - The number of allowed check failures before changing the operating status of the member to ERROR. A valid value is from 1 to 10. The default is 3.
- resp_timeout:
- required: true
- description:
- - The time, in seconds, after which a health check times out. Must be less than delay
- type: int
- admin_state_up:
- default: True
- description:
- - The admin state of the helath monitor true for up or false for down
- type: bool
- expected_codes:
- type: 'str'
- default: '200'
- description:
- - The list of HTTP status codes expected in response from the member to declare it healthy. Specify one of the following values
- A single value, such as 200
- A list, such as 200, 202
- A range, such as 200-204
- http_method:
- type: 'str'
- default: GET
- choices: ['GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT', 'TRACE']
- description:
- - The HTTP method that the health monitor uses for requests. One of CONNECT, DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT, or TRACE. The default is GET.
- url_path:
- type: 'str'
- default: '/'
- description:
- - The HTTP URL path of the request sent by the monitor to test the health of a backend member.
- Must be a string that begins with a forward slash (/). The default URL path is /.
-requirements: ["openstacksdk"]
+ delay:
+ description:
+ - The interval, in seconds, between health checks.
+ - Required when I(state) is C(present).
+ type: int
+ expected_codes:
+ description:
+ - The list of HTTP status codes expected in response from the member to
+ declare it healthy. Specify one of the following values.
+ - For example, I(expected_codes) could be a single value, such as C(200),
+ a list, such as C(200, 202) or a range, such as C(200-204).
+ - "Octavia's default for I(expected_codes) is C(200)."
+ type: str
+ health_monitor_timeout:
+ description:
+ - The time, in seconds, after which a health check times out.
+ - Must be less than I(delay).
+ - Required when I(state) is C(present).
+ type: int
+ aliases: ['resp_timeout']
+ http_method:
+ description:
+ - The HTTP method that the health monitor uses for requests.
+ - For example, I(http_method) could be C(CONNECT), C(DELETE), C(GET),
+ C(HEAD), C(OPTIONS), C(PATCH), C(POST), C(PUT), or C(TRACE).
+ - "Octavia's default for I(http_method) is C(GET)."
+ type: str
+ is_admin_state_up:
+ description:
+ - Whether the health monitor is up or down.
+ type: bool
+ aliases: ['admin_state_up']
+ max_retries:
+ description:
+ - The number of successful checks before changing the operating status
+ of the member to ONLINE.
+ - Required when I(state) is C(present).
+ type: int
+ max_retries_down:
+ description:
+ - The number of allowed check failures before changing the operating
+ status of the member to ERROR. A valid value is from 1 to 10.
+ type: int
+ name:
+ description:
+ - Name that has to be given to the health monitor.
+ - This attribute cannot be updated.
+ type: str
+ required: true
+ pool:
+ description:
+ - The pool name or id to monitor by the health monitor.
+ - Required when I(state) is C(present).
+ - This attribute cannot be updated.
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+ type:
+ default: HTTP
+ description:
+ - The type of health monitor.
+ - For example, I(type) could be C(HTTP), C(HTTPS), C(PING), C(SCTP),
+ C(TCP), C(TLS-HELLO) or C(UDP-CONNECT).
+ - This attribute cannot be updated.
+ type: str
+ url_path:
+ description:
+ - The HTTP URL path of the request sent by the monitor to test the health
+ of a backend member.
+ - Must be a string that begins with a forward slash (C(/)).
+ - "Octavia's default URL path is C(/)."
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-#Create a healtmonitor named healthmonitor01 with method HEAD url_path /status and expect code 200
-- openstack.cloud.lb_health_monitor:
- auth:
- auth_url: "{{keystone_url}}"
- username: "{{username}}"
- password: "{{password}}"
- project_domain_name: "{{domain_name}}"
- user_domain_name: "{{domain_name}}"
- project_name: "{{project_name}}"
- wait: true
- admin_state_up: True
+
+RETURN = r'''
+health_monitor:
+ description: Dictionary describing the load-balancer health monitor.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ created_at:
+ description: The UTC date and timestamp when the resource was created.
+ type: str
+ delay:
+ description: The time, in seconds, between sending probes to members.
+ type: int
+ expected_codes:
+ description: The list of HTTP status codes expected in response from the
+ member to declare it healthy.
+ type: str
+ http_method:
+ description: The HTTP method that the health monitor uses for requests.
+ type: str
+ id:
+ description: The health monitor UUID.
+ type: str
+ is_admin_state_up:
+ description: The administrative state of the resource.
+ type: bool
+ max_retries:
+ description: The number of successful checks before changing the
+ operating status of the member to ONLINE.
+ type: int
+ max_retries_down:
+ description: The number of allowed check failures before changing the
+ operating status of the member to ERROR.
+ type: int
+ name:
+ description: Human-readable name of the resource.
+ type: str
+ operating_status:
+ description: The operating status of the resource.
+ type: str
+ pool_id:
+ description: The id of the pool.
+ type: str
+ pools:
+ description: List of associated pool ids.
+ type: list
+ project_id:
+ description: The ID of the project owning this resource.
+ type: str
+ provisioning_status:
+ description: The provisioning status of the resource.
+ type: str
+ tags:
+ description: A list of associated tags.
+ type: list
+ timeout:
+ description: The maximum time, in seconds, that a monitor waits to
+ connect before it times out.
+ type: int
+ type:
+ description: The type of health monitor.
+ type: str
+ updated_at:
+ description: The UTC date and timestamp when the resource was last
+ updated.
+ type: str
+ url_path:
+ description: The HTTP URL path of the request sent by the monitor to
+ test the health of a backend member.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Create a load-balancer health monitor
+ openstack.cloud.lb_health_monitor:
+ cloud: devstack
+ delay: 10
expected_codes: '200'
- max_retries_down: '4'
+ health_monitor_timeout: 5
http_method: GET
- url_path: "/status"
- pool: '{{pool_id}}'
- name: 'healthmonitor01'
- delay: '10'
- max_retries: '3'
- resp_timeout: '5'
+ is_admin_state_up: true
+ max_retries: 3
+ max_retries_down: 4
+ name: healthmonitor01
+ pool: lb_pool
state: present
-'''
-RETURN = '''
-health_monitor:
- description: Dictionary describing the health monitor.
- returned: On success when C(state=present)
- type: complex
- contains:
- id:
- description: The health monitor UUID.
- returned: On success when C(state=present)
- type: str
- admin_state_up:
- returned: On success when C(state=present)
- description: The administrative state of the resource.
- type: bool
- created_at:
- returned: On success when C(state=present)
- description: The UTC date and timestamp when the resource was created.
- type: str
- delay:
- returned: On success when C(state=present)
- description: The time, in seconds, between sending probes to members.
- type: int
- expected_codes:
- returned: On success when C(state=present)
- description: The list of HTTP status codes expected in response from the member to declare it healthy.
- type: str
- http_method:
- returned: On success when C(state=present)
- description: The HTTP method that the health monitor uses for requests.
- type: str
- max_retries:
- returned: On success when C(state=present)
- description: The number of successful checks before changing the operating status of the member to ONLINE.
- type: str
- max_retries_down:
- returned: On success when C(state=present)
- description: The number of allowed check failures before changing the operating status of the member to ERROR.
- type: str
- name:
- returned: On success when C(state=present)
- description: Human-readable name of the resource.
- type: str
- operating_status:
- returned: On success when C(state=present)
- description: The operating status of the resource.
- type: str
- pool_id:
- returned: On success when C(state=present)
- description: The id of the pool.
- type: str
- project_id:
- returned: On success when C(state=present)
- description: The ID of the project owning this resource.
- type: str
- provisioning_status:
- returned: On success when C(state=present)
- description: The provisioning status of the resource.
- type: str
- timeout:
- returned: On success when C(state=present)
- description: The maximum time, in seconds, that a monitor waits to connect before it times out.
- type: int
- type:
- returned: On success when C(state=present)
- description: The type of health monitor.
- type: str
- updated_at:
- returned: On success when C(state=present)
- description: The UTC date and timestamp when the resource was last updated.
- type: str
- url_path:
- returned: On success when C(state=present)
- description: The HTTP URL path of the request sent by the monitor to test the health of a backend member.
- type: str
-'''
-import time
+ url_path: '/status'
+- name: Delete a load-balancer health monitor
+ openstack.cloud.lb_health_monitor:
+ cloud: devstack
+ name: healthmonitor01
+ state: absent
+'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class HealthMonitorModule(OpenStackModule):
+class LoadBalancerHealthMonitorModule(OpenStackModule):
- def _wait_for_health_monitor_status(self, health_monitor_id, status, failures, interval=5):
- timeout = self.params['timeout']
+ argument_spec = dict(
+ delay=dict(type='int'),
+ expected_codes=dict(),
+ health_monitor_timeout=dict(type='int', aliases=['resp_timeout']),
+ http_method=dict(),
+ is_admin_state_up=dict(type='bool', aliases=['admin_state_up']),
+ max_retries=dict(type='int'),
+ max_retries_down=dict(type='int'),
+ name=dict(required=True),
+ pool=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ type=dict(default='HTTP'),
+ url_path=dict(),
+ )
- total_sleep = 0
- if failures is None:
- failures = []
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('delay', 'health_monitor_timeout',
+ 'max_retries', 'pool',)),
+ ],
+ supports_check_mode=True,
+ )
- while total_sleep < timeout:
- health_monitor = self.conn.load_balancer.get_health_monitor(health_monitor_id)
- provisioning_status = health_monitor.provisioning_status
- if provisioning_status == status:
- return health_monitor
- if provisioning_status in failures:
- self._fail_json(
- msg="health monitor %s transitioned to failure state %s" %
- (health_monitor, provisioning_status)
- )
+ def run(self):
+ state = self.params['state']
- time.sleep(interval)
- total_sleep += interval
+ health_monitor = self._find()
- self._fail_json(msg="timeout waiting for health monitor %s to transition to %s" %
- (health_monitor_id, status)
- )
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, health_monitor))
- argument_spec = dict(
- name=dict(required=True),
- delay=dict(required=True),
- max_retries=dict(required=True),
- max_retries_down=dict(required=False, default="3"),
- resp_timeout=dict(required=True, type='int'),
- pool=dict(required=True),
- expected_codes=dict(required=False, default="200"),
- admin_state_up=dict(required=False, default=True, type='bool'),
- state=dict(default='present', choices=['absent', 'present']),
- http_method=dict(default="GET", required=False, choices=["GET", "CONNECT", "DELETE",
- "HEAD", "OPTIONS", "PATCH",
- "POST", "PUT", "TRACE"]),
- url_path=dict(default="/", required=False),
- type=dict(default='HTTP',
- choices=['HTTP', 'HTTPS', 'PING', 'SCTP', 'TCP', 'TLS-HELLO', 'UDP-CONNECT']))
+ if state == 'present' and not health_monitor:
+ # Create health_monitor
+ health_monitor = self._create()
+ self.exit_json(
+ changed=True,
+ health_monitor=health_monitor.to_dict(computed=False))
- module_kwargs = dict(supports_check_mode=True)
+ elif state == 'present' and health_monitor:
+ # Update health_monitor
+ update = self._build_update(health_monitor)
+ if update:
+ health_monitor = self._update(health_monitor, update)
- def run(self):
+ self.exit_json(
+ changed=bool(update),
+ health_monitor=health_monitor.to_dict(computed=False))
+
+ elif state == 'absent' and health_monitor:
+ # Delete health_monitor
+ self._delete(health_monitor)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not health_monitor:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, health_monitor):
+ update = {}
+
+ non_updateable_keys = [k for k in ['type']
+ if self.params[k] is not None
+ and self.params[k] != health_monitor[k]]
+
+ pool_name_or_id = self.params['pool']
+ pool = self.conn.load_balancer.find_pool(name_or_id=pool_name_or_id,
+ ignore_missing=False)
+ # Field pool_id is not returned from self.conn.load_balancer.\
+ # find_pool() so use pools instead.
+ if health_monitor['pools'] != [dict(id=pool.id)]:
+ non_updateable_keys.append('pool')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['delay', 'expected_codes', 'http_method',
+ 'is_admin_state_up', 'max_retries',
+ 'max_retries_down', 'type', 'url_path']
+ if self.params[k] is not None
+ and self.params[k] != health_monitor[k])
+
+ health_monitor_timeout = self.params['health_monitor_timeout']
+ if health_monitor_timeout is not None \
+ and health_monitor_timeout != health_monitor['timeout']:
+ attributes['timeout'] = health_monitor_timeout
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['delay', 'expected_codes', 'http_method',
+ 'is_admin_state_up', 'max_retries',
+ 'max_retries_down', 'name', 'type', 'url_path']
+ if self.params[k] is not None)
+
+ health_monitor_timeout = self.params['health_monitor_timeout']
+ if health_monitor_timeout is not None:
+ kwargs['timeout'] = health_monitor_timeout
+
+ pool_name_or_id = self.params['pool']
+ pool = self.conn.load_balancer.find_pool(name_or_id=pool_name_or_id,
+ ignore_missing=False)
+ kwargs['pool_id'] = pool.id
+
+ health_monitor = \
+ self.conn.load_balancer.create_health_monitor(**kwargs)
+
+ if self.params['wait']:
+ health_monitor = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, health_monitor,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
+
+ return health_monitor
+
+ def _delete(self, health_monitor):
+ self.conn.load_balancer.delete_health_monitor(health_monitor.id)
+
+ def _find(self):
+ name = self.params['name']
+ return self.conn.load_balancer.find_health_monitor(name_or_id=name)
+
+ def _update(self, health_monitor, update):
+ attributes = update.get('attributes')
+ if attributes:
+ health_monitor = self.conn.load_balancer.update_health_monitor(
+ health_monitor.id, **attributes)
+
+ if self.params['wait']:
+ health_monitor = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, health_monitor,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
+
+ return health_monitor
- try:
- changed = False
- health_monitor = self.conn.load_balancer.find_health_monitor(name_or_id=self.params['name'])
- pool = self.conn.load_balancer.find_pool(name_or_id=self.params['pool'])
- if self.params['state'] == 'present':
- if not health_monitor:
- changed = True
- health_attrs = {"pool_id": pool.id,
- "type": self.params["type"],
- "delay": self.params['delay'],
- "max_retries": self.params['max_retries'],
- "max_retries_down": self.params['max_retries_down'],
- "timeout": self.params['resp_timeout'],
- "name": self.params['name'],
- "admin_state_up": self.params["admin_state_up"],
- }
- if self.params["type"] in ["HTTP", "HTTPS"]:
- health_attrs["expected_codes"] = self.params["expected_codes"]
- health_attrs["http_method"] = self.params["http_method"]
- health_attrs["url_path"] = self.params["url_path"]
-
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- health_monitor = self.conn.load_balancer.create_health_monitor(**health_attrs)
- if not self.params['wait']:
- self.exit_json(changed=changed, id=health_monitor.id,
- health_monitor=health_monitor.to_dict())
- else:
- health_monitor = self._wait_for_health_monitor_status(health_monitor.id, "ACTIVE", ["ERROR"])
- self.exit_json(changed=changed, id=health_monitor.id,
- health_monitor=health_monitor.to_dict())
- else:
- self.exit_json(changed=changed, id=health_monitor.id,
- health_monitor=health_monitor.to_dict()
- )
- elif self.params['state'] == 'absent':
- if health_monitor:
- if self.ansible.check_mode:
- self.exit_json(changed=True)
- self.conn.load_balancer.delete_health_monitor(health_monitor)
- changed = True
-
- self.exit_json(changed=changed)
- except Exception as e:
- self.fail(msg=str(e))
+ def _will_change(self, state, health_monitor):
+ if state == 'present' and not health_monitor:
+ return True
+ elif state == 'present' and health_monitor:
+ return bool(self._build_update(health_monitor))
+ elif state == 'absent' and health_monitor:
+ return True
+ else:
+ # state == 'absent' and not health_monitor:
+ return False
def main():
- module = HealthMonitorModule()
+ module = LoadBalancerHealthMonitorModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/lb_listener.py b/ansible_collections/openstack/cloud/plugins/modules/lb_listener.py
index f4cdad48a..bd38cc638 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/lb_listener.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/lb_listener.py
@@ -1,285 +1,383 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: lb_listener
-short_description: Add/Delete a listener for a load balancer from OpenStack Cloud
+short_description: Manage load-balancer listener in a OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Add or Remove a listener for a load balancer from the OpenStack load-balancer service.
+ - Add, update or remove listener from OpenStack load-balancer.
options:
- name:
- description:
- - Name that has to be given to the listener
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- loadbalancer:
- description:
- - The name or id of the load balancer that this listener belongs to.
- required: true
- type: str
- protocol:
- description:
- - The protocol for the listener.
- choices: [HTTP, HTTPS, TCP, TERMINATED_HTTPS, UDP, SCTP]
- default: HTTP
- type: str
- protocol_port:
- description:
- - The protocol port number for the listener.
- default: 80
- type: int
- timeout_client_data:
- description:
- - Client inactivity timeout in milliseconds.
- default: 50000
- type: int
- timeout_member_data:
- description:
- - Member inactivity timeout in milliseconds.
- default: 50000
- type: int
- wait:
- description:
- - If the module should wait for the load balancer to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the load balancer to get
- into ACTIVE state.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ default_tls_container_ref:
+ description:
+ - A URI to a key manager service secrets container with TLS secrets.
+ type: str
+ description:
+ description:
+ - A human-readable description for the load-balancer listener.
+ type: str
+ is_admin_state_up:
+ description:
+ - The administrative state of the listener, which is up or down.
+ type: bool
+ load_balancer:
+ description:
+ - The name or id of the load-balancer that this listener belongs to.
+ - Required when I(state) is C(present).
+ - This attribute cannot be updated.
+ type: str
+ aliases: ['loadbalancer']
+ name:
+ description:
+ - Name that has to be given to the listener.
+ - This attribute cannot be updated.
+ required: true
+ type: str
+ protocol:
+ description:
+ - The protocol for the listener.
+ - For example, I(protocol) could be C(HTTP), C(HTTPS), C(TCP),
+ C(TERMINATED_HTTPS), C(UDP), C(SCTP) or C(PROMETHEUS).
+ - This attribute cannot be updated.
+ default: HTTP
+ type: str
+ protocol_port:
+ description:
+ - The protocol port number for the listener.
+ - This attribute cannot be updated.
+ type: int
+ sni_container_refs:
+ description:
+ - A list of URIs to the key manager service secrets containers with TLS
+ secrets.
+ type: list
+ elements: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+ timeout_client_data:
+ description:
+ - Client inactivity timeout in milliseconds.
+ type: int
+ timeout_member_data:
+ description:
+ - Member inactivity timeout in milliseconds.
+ type: int
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
-id:
- description: The listener UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+RETURN = r'''
listener:
- description: Dictionary describing the listener.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the listener.
- type: str
- sample: "test"
- description:
- description: The listener description.
- type: str
- sample: "description"
- load_balancer_id:
- description: The load balancer UUID this listener belongs to.
- type: str
- sample: "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"
- loadbalancers:
- description: A list of load balancer IDs..
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- provisioning_status:
- description: The provisioning status of the listener.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the listener.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the listener.
- type: bool
- sample: true
- protocol:
- description: The protocol for the listener.
- type: str
- sample: "HTTP"
- protocol_port:
- description: The protocol port number for the listener.
- type: int
- sample: 80
- timeout_client_data:
- description: Client inactivity timeout in milliseconds.
- type: int
- sample: 50000
- timeout_member_data:
- description: Member inactivity timeout in milliseconds.
- type: int
- sample: 50000
+ description: Dictionary describing the listener.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ allowed_cidrs:
+ description: List of IPv4 or IPv6 CIDRs.
+ type: list
+ alpn_protocols:
+ description: List of ALPN protocols.
+ type: list
+ connection_limit:
+ description: The maximum number of connections permitted for this load
+ balancer.
+ type: str
+ created_at:
+ description: Timestamp when the listener was created.
+ type: str
+ default_pool:
+ description: Default pool to which the requests will be routed.
+ type: str
+ default_pool_id:
+ description: ID of default pool. Must have compatible protocol with
+ listener.
+ type: str
+ default_tls_container_ref:
+ description: A reference to a container of TLS secrets.
+ type: str
+ description:
+ description: The listener description.
+ type: str
+ sample: "description"
+ id:
+ description: Unique UUID.
+ type: str
+ sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+ insert_headers:
+ description: Dictionary of additional headers insertion into HTTP header.
+ type: dict
+ is_admin_state_up:
+ description: The administrative state of the listener.
+ type: bool
+ sample: true
+ l7_policies:
+ description: A list of L7 policy objects.
+ type: list
+ load_balancer_id:
+ description: The load balancer UUID this listener belongs to.
+ type: str
+ sample: "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"
+ load_balancers:
+ description: A list of load balancer IDs.
+ type: list
+ sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
+ name:
+ description: Name given to the listener.
+ type: str
+ sample: "test"
+ operating_status:
+ description: The operating status of the listener.
+ type: str
+ sample: "ONLINE"
+ project_id:
+ description: The ID of the project owning this resource.
+ type: str
+ protocol:
+ description: The protocol for the listener.
+ type: str
+ sample: "HTTP"
+ protocol_port:
+ description: The protocol port number for the listener.
+ type: int
+ sample: 80
+ provisioning_status:
+ description: The provisioning status of the listener.
+ type: str
+ sample: "ACTIVE"
+ sni_container_refs:
+ description: A list of references to TLS secrets.
+ type: list
+ tags:
+ description: A list of associated tags.
+ type: list
+ timeout_client_data:
+ description: Client inactivity timeout in milliseconds.
+ type: int
+ sample: 50000
+ timeout_member_connect:
+ description: Backend member connection timeout in milliseconds.
+ type: int
+ timeout_member_data:
+ description: Member inactivity timeout in milliseconds.
+ type: int
+ sample: 50000
+ timeout_tcp_inspect:
+ description: Time, in milliseconds, to wait for additional TCP packets
+ for content inspection.
+ type: int
+ tls_ciphers:
+ description: Stores a cipher string in OpenSSL format.
+ type: str
+ tls_versions:
+ description: A list of TLS protocols to be used by the listener.
+ type: list
+ updated_at:
+ description: Timestamp when the listener was last updated.
+ type: str
'''
-EXAMPLES = '''
-# Create a listener, wait for the loadbalancer to be active.
-- openstack.cloud.lb_listener:
+EXAMPLES = r'''
+- name: Create a listener, wait for the loadbalancer to be active
+ openstack.cloud.lb_listener:
cloud: mycloud
- endpoint_type: admin
- state: present
+ load_balancer: test-loadbalancer
name: test-listener
- loadbalancer: test-loadbalancer
protocol: HTTP
protocol_port: 8080
-
-# Create a listener, do not wait for the loadbalancer to be active.
-- openstack.cloud.lb_listener:
- cloud: mycloud
- endpoint_type: admin
state: present
- name: test-listener
- loadbalancer: test-loadbalancer
- protocol: HTTP
- protocol_port: 8080
- wait: no
-# Delete a listener
-- openstack.cloud.lb_listener:
+- name: Delete a listener
+ openstack.cloud.lb_listener:
cloud: mycloud
- endpoint_type: admin
- state: absent
+ load_balancer: test-loadbalancer
name: test-listener
- loadbalancer: test-loadbalancer
+ state: absent
-# Create a listener, increase timeouts for connection persistence (for SSH for example).
-- openstack.cloud.lb_listener:
+- name: Create a listener, increase timeouts for connection persistence
+ openstack.cloud.lb_listener:
cloud: mycloud
- endpoint_type: admin
- state: present
+ load_balancer: test-loadbalancer
name: test-listener
- loadbalancer: test-loadbalancer
protocol: TCP
protocol_port: 22
+ state: present
timeout_client_data: 1800000
timeout_member_data: 1800000
'''
-import time
-
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class LoadbalancerListenerModule(OpenStackModule):
+class LoadBalancerListenerModule(OpenStackModule):
argument_spec = dict(
+ default_tls_container_ref=dict(),
+ description=dict(),
+ is_admin_state_up=dict(type='bool'),
+ load_balancer=dict(aliases=['loadbalancer']),
name=dict(required=True),
+ protocol=dict(default='HTTP'),
+ protocol_port=dict(type='int'),
+ sni_container_refs=dict(type='list', elements='str'),
state=dict(default='present', choices=['absent', 'present']),
- loadbalancer=dict(required=True),
- protocol=dict(default='HTTP',
- choices=['HTTP', 'HTTPS', 'TCP', 'TERMINATED_HTTPS', 'UDP', 'SCTP']),
- protocol_port=dict(default=80, type='int', required=False),
- timeout_client_data=dict(default=50000, type='int', required=False),
- timeout_member_data=dict(default=50000, type='int', required=False),
+ timeout_client_data=dict(type='int'),
+ timeout_member_data=dict(type='int'),
+ )
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('load_balancer',)),
+ ],
+ supports_check_mode=True,
)
- module_kwargs = dict()
-
- def _lb_wait_for_status(self, lb, status, failures, interval=5):
- """Wait for load balancer to be in a particular provisioning status."""
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- lb = self.conn.load_balancer.get_load_balancer(lb.id)
- if lb.provisioning_status == status:
- return None
- if lb.provisioning_status in failures:
- self.fail_json(
- msg="Load Balancer %s transitioned to failure state %s" %
- (lb.id, lb.provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for Load Balancer %s to transition to %s" %
- (lb.id, status)
- )
def run(self):
- loadbalancer = self.params['loadbalancer']
- loadbalancer_id = None
-
- changed = False
- listener = self.conn.load_balancer.find_listener(
- name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if not listener:
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- loadbalancer_id = lb.id
-
- listener = self.conn.load_balancer.create_listener(
- name=self.params['name'],
- loadbalancer_id=loadbalancer_id,
- protocol=self.params['protocol'],
- protocol_port=self.params['protocol_port'],
- timeout_client_data=self.params['timeout_client_data'],
- timeout_member_data=self.params['timeout_member_data'],
- )
- changed = True
-
- if not self.params['wait']:
- self.exit_json(
- changed=changed, listener=listener.to_dict(),
- id=listener.id)
-
- if self.params['wait']:
- # Check in case the listener already exists.
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- self._lb_wait_for_status(lb, "ACTIVE", ["ERROR"])
-
- self.exit_json(
- changed=changed, listener=listener.to_dict(), id=listener.id)
- elif self.params['state'] == 'absent':
- if not listener:
- changed = False
- else:
- self.conn.load_balancer.delete_listener(listener)
- changed = True
-
- if self.params['wait']:
- # Wait for the load balancer to be active after deleting
- # the listener.
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- self._lb_wait_for_status(lb, "ACTIVE", ["ERROR"])
-
- self.exit_json(changed=changed)
+ state = self.params['state']
+
+ listener = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, listener))
+
+ if state == 'present' and not listener:
+ # Create listener
+ listener = self._create()
+ self.exit_json(changed=True,
+ rbac_listener=listener.to_dict(computed=False),
+ listener=listener.to_dict(computed=False))
+
+ elif state == 'present' and listener:
+ # Update listener
+ update = self._build_update(listener)
+ if update:
+ listener = self._update(listener, update)
+
+ self.exit_json(changed=bool(update),
+ rbac_listener=listener.to_dict(computed=False),
+ listener=listener.to_dict(computed=False))
+
+ elif state == 'absent' and listener:
+ # Delete listener
+ self._delete(listener)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not listener:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, listener):
+ update = {}
+
+ non_updateable_keys = [k for k in ['protocol', 'protocol_port']
+ if self.params[k] is not None
+ and self.params[k] != listener[k]]
+
+ load_balancer_name_or_id = self.params['load_balancer']
+ load_balancer = self.conn.load_balancer.find_load_balancer(
+ load_balancer_name_or_id, ignore_missing=False)
+ # Field load_balancer_id is not returned from self.conn.load_balancer.\
+ # find_load_balancer() so use load_balancers instead.
+ if listener['load_balancers'] != [dict(id=load_balancer.id)]:
+ non_updateable_keys.append('load_balancer')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['default_tls_container_ref',
+ 'description',
+ 'is_admin_state_up',
+ 'sni_container_refs',
+ 'timeout_client_data',
+ 'timeout_member_data']
+ if self.params[k] is not None
+ and self.params[k] != listener[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['default_tls_container_ref', 'description',
+ 'is_admin_state_up', 'name', 'protocol',
+ 'protocol_port', 'sni_container_refs',
+ 'timeout_client_data', 'timeout_member_data']
+ if self.params[k] is not None)
+
+ load_balancer_name_or_id = self.params['load_balancer']
+ load_balancer = self.conn.load_balancer.find_load_balancer(
+ load_balancer_name_or_id, ignore_missing=False)
+ kwargs['load_balancer_id'] = load_balancer.id
+
+ listener = self.conn.load_balancer.create_listener(**kwargs)
+
+ if self.params['wait']:
+ self.conn.load_balancer.wait_for_load_balancer(
+ listener.load_balancer_id,
+ wait=self.params['timeout'])
+
+ return listener
+
+ def _delete(self, listener):
+ self.conn.load_balancer.delete_listener(listener.id)
+
+ if self.params['wait']:
+ # Field load_balancer_id is not returned from self.conn.\
+ # load_balancer.find_listener() so use load_balancers instead.
+ if not listener.load_balancers \
+ or len(listener.load_balancers) != 1:
+ raise AssertionError("A single load-balancer is expected")
+
+ self.conn.load_balancer.wait_for_load_balancer(
+ listener.load_balancers[0]['id'],
+ wait=self.params['timeout'])
+
+ def _find(self):
+ name = self.params['name']
+ return self.conn.load_balancer.find_listener(name_or_id=name)
+
+ def _update(self, listener, update):
+ attributes = update.get('attributes')
+ if attributes:
+ listener = self.conn.load_balancer.update_listener(listener.id,
+ **attributes)
+
+ if self.params['wait']:
+ # Field load_balancer_id is not returned from self.conn.\
+ # load_balancer.find_listener() so use load_balancers instead.
+ if not listener.load_balancers \
+ or len(listener.load_balancers) != 1:
+ raise AssertionError("A single load-balancer is expected")
+
+ self.conn.load_balancer.wait_for_load_balancer(
+ listener.load_balancers[0]['id'],
+ wait=self.params['timeout'])
+
+ return listener
+
+ def _will_change(self, state, listener):
+ if state == 'present' and not listener:
+ return True
+ elif state == 'present' and listener:
+ return bool(self._build_update(listener))
+ elif state == 'absent' and listener:
+ return True
+ else:
+ # state == 'absent' and not listener:
+ return False
def main():
- module = LoadbalancerListenerModule()
+ module = LoadBalancerListenerModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/lb_member.py b/ansible_collections/openstack/cloud/plugins/modules/lb_member.py
index 264f2b8e6..99c32e53f 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/lb_member.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/lb_member.py
@@ -1,233 +1,377 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: lb_member
-short_description: Add/Delete a member for a pool in load balancer from OpenStack Cloud
+short_description: Manage members in a OpenStack load-balancer pool
author: OpenStack Ansible SIG
description:
- - Add or Remove a member for a pool from the OpenStack load-balancer service.
+ - Add, update or remove member from OpenStack load-balancer pool.
options:
- name:
- description:
- - Name that has to be given to the member
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- pool:
- description:
- - The name or id of the pool that this member belongs to.
- required: true
- type: str
- protocol_port:
- description:
- - The protocol port number for the member.
- default: 80
- type: int
- address:
- description:
- - The IP address of the member.
- type: str
- subnet_id:
- description:
- - The subnet ID the member service is accessible from.
- type: str
- wait:
- description:
- - If the module should wait for the load balancer to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the load balancer to get
- into ACTIVE state.
- default: 180
- type: int
- monitor_address:
- description:
- - IP address used to monitor this member
- type: str
- monitor_port:
- description:
- - Port used to monitor this member
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ address:
+ description:
+ - The IP address of the member.
+ - Required when I(state) is C(present).
+ - This attribute cannot be updated.
+ type: str
+ monitor_address:
+ description:
+ - IP address used to monitor this member.
+ type: str
+ monitor_port:
+ description:
+ - Port used to monitor this member.
+ type: int
+ name:
+ description:
+ - Name that has to be given to the member.
+ required: true
+ type: str
+ pool:
+ description:
+ - The name or id of the pool that this member belongs to.
+ - This attribute cannot be updated.
+ required: true
+ type: str
+ protocol_port:
+ description:
+ - The protocol port number for the member.
+ - Required when I(state) is C(present).
+ - This attribute cannot be updated.
+ type: int
+ state:
+ description:
+ - Should the resource be C(present) or C(absent).
+ choices: [present, absent]
+ default: present
+ type: str
+ subnet_id:
+ description:
+ - The subnet ID the member service is accessible from.
+ - This attribute cannot be updated.
+ type: str
+ weight:
+ description:
+ - The weight of a member determines the portion of requests or
+ connections it services compared to the other members of the pool.
+ - For example, a member with a weight of 10 receives five times as many
+ requests as a member with a weight of 2. A value of 0 means the member
+ does not receive new connections but continues to service existing
+ connections. A valid value is from 0 to 256.
+ - "Octavia's default for I(weight) is C(1)."
+ type: int
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
-id:
- description: The member UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+RETURN = r'''
member:
- description: Dictionary describing the member.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the member.
- type: str
- sample: "test"
- description:
- description: The member description.
- type: str
- sample: "description"
- provisioning_status:
- description: The provisioning status of the member.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the member.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the member.
- type: bool
- sample: true
- protocol_port:
- description: The protocol port number for the member.
- type: int
- sample: 80
- subnet_id:
- description: The subnet ID the member service is accessible from.
- type: str
- sample: "489247fa-9c25-11e8-9679-00224d6b7bc1"
- address:
- description: The IP address of the backend member server.
- type: str
- sample: "192.168.2.10"
+ description: Dictionary describing the load-balancer pool member.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ address:
+ description: The IP address of the backend member server.
+ type: str
+ backup:
+ description: A bool value that indicates whether the member is a backup
+ or not. Backup members only receive traffic when all
+ non-backup members are down.
+ type: bool
+ created_at:
+ description: Timestamp when the member was created.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_admin_state_up:
+ description: The administrative state of the member.
+ type: bool
+ monitor_address:
+ description: IP address used to monitor this member.
+ type: str
+ monitor_port:
+ description: Port used to monitor this member.
+ type: int
+ name:
+ description: Name given to the member.
+ type: str
+ operating_status:
+ description: Operating status of the member.
+ type: str
+ project_id:
+ description: The ID of the project this member is associated with.
+ type: str
+ protocol_port:
+ description: The protocol port number for the member.
+ type: int
+ provisioning_status:
+ description: The provisioning status of the member.
+ type: str
+ subnet_id:
+ description: The subnet ID the member service is accessible from.
+ type: str
+ tags:
+ description: A list of associated tags.
+ type: list
+ updated_at:
+ description: Timestamp when the member was last updated.
+ type: str
+ weight:
+ description: A positive integer value that indicates the relative portion
+ of traffic that this member should receive from the pool.
+ For example, a member with a weight of 10 receives five
+ times as much traffic as a member with weight of 2.
+ type: int
+pool:
+ description: Dictionary describing the load-balancer pool.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ alpn_protocols:
+ description: List of ALPN protocols.
+ type: list
+ created_at:
+ description: Timestamp when the pool was created.
+ type: str
+ description:
+ description: The pool description.
+ type: str
+ health_monitor_id:
+ description: Health Monitor ID.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_admin_state_up:
+ description: The administrative state of the pool.
+ type: bool
+ lb_algorithm:
+ description: The load balancing algorithm for the pool.
+ type: str
+ listener_id:
+ description: The listener ID the pool belongs to.
+ type: str
+ listeners:
+ description: A list of listener IDs.
+ type: list
+ loadbalancer_id:
+ description: The load balancer ID the pool belongs to. This field is set
+ when the pool does not belong to any listener in the load
+ balancer.
+ type: str
+ loadbalancers:
+ description: A list of load balancer IDs.
+ type: list
+ members:
+ description: A list of member IDs.
+ type: list
+ name:
+ description: Name given to the pool.
+ type: str
+ operating_status:
+ description: The operating status of the pool.
+ type: str
+ project_id:
+ description: The ID of the project.
+ type: str
+ protocol:
+ description: The protocol for the pool.
+ type: str
+ provisioning_status:
+ description: The provisioning status of the pool.
+ type: str
+ session_persistence:
+ description: A JSON object specifying the session persistence for the
+ pool.
+ type: dict
+ tags:
+ description: A list of associated tags.
+ type: list
+ tls_ciphers:
+ description: Stores a string of cipher strings in OpenSSL format.
+ type: str
+ tls_enabled:
+ description: Use TLS for connections to backend member servers.
+ type: bool
+ tls_versions:
+ description: A list of TLS protocol versions to be used in by the pool.
+ type: list
+ updated_at:
+ description: Timestamp when the pool was updated.
+ type: str
'''
-EXAMPLES = '''
-# Create a member, wait for the member to be created.
-- openstack.cloud.lb_member:
+EXAMPLES = r'''
+- name: Create member in a load-balancer pool
+ openstack.cloud.lb_member:
+ address: 192.168.10.3
cloud: mycloud
- endpoint_type: admin
- state: present
name: test-member
pool: test-pool
- address: 192.168.10.3
protocol_port: 8080
+ state: present
-# Delete a listener
-- openstack.cloud.lb_member:
+- name: Delete member from a load-balancer pool
+ openstack.cloud.lb_member:
cloud: mycloud
- endpoint_type: admin
- state: absent
name: test-member
pool: test-pool
+ state: absent
'''
-import time
-
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class LoadbalancerMemberModule(OpenStackModule):
+class LoadBalancerMemberModule(OpenStackModule):
argument_spec = dict(
+ address=dict(),
+ monitor_address=dict(),
+ monitor_port=dict(type='int'),
name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
pool=dict(required=True),
- address=dict(default=None),
- protocol_port=dict(default=80, type='int'),
- subnet_id=dict(default=None),
- monitor_address=dict(default=None),
- monitor_port=dict(default=None, type='int')
+ protocol_port=dict(type='int'),
+ state=dict(default='present', choices=['absent', 'present']),
+ subnet_id=dict(),
+ weight=dict(type='int'),
+ )
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('address', 'protocol_port',)),
+ ],
+ supports_check_mode=True,
)
- module_kwargs = dict()
-
- def _wait_for_member_status(self, pool_id, member_id, status,
- failures, interval=5):
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- member = self.conn.load_balancer.get_member(member_id, pool_id)
- provisioning_status = member.provisioning_status
- if provisioning_status == status:
- return member
- if provisioning_status in failures:
- self.fail_json(
- msg="Member %s transitioned to failure state %s" %
- (member_id, provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for member %s to transition to %s" %
- (member_id, status)
- )
def run(self):
- name = self.params['name']
- pool = self.params['pool']
+ state = self.params['state']
+
+ member, pool = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, member, pool))
+
+ if state == 'present' and not member:
+ # Create member
+ member = self._create(pool)
+ self.exit_json(changed=True,
+ member=member.to_dict(computed=False),
+ pool=pool.to_dict(computed=False))
+
+ elif state == 'present' and member:
+ # Update member
+ update = self._build_update(member, pool)
+ if update:
+ member = self._update(member, pool, update)
+
+ self.exit_json(changed=bool(update),
+ member=member.to_dict(computed=False),
+ pool=pool.to_dict(computed=False))
+
+ elif state == 'absent' and member:
+ # Delete member
+ self._delete(member, pool)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not member:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, member, pool):
+ update = {}
- changed = False
+ non_updateable_keys = [k for k in ['address', 'name', 'protocol_port',
+ 'subnet_id']
+ if self.params[k] is not None
+ and self.params[k] != member[k]]
- pool_ret = self.conn.load_balancer.find_pool(name_or_id=pool)
- if not pool_ret:
- self.fail_json(msg='pool %s is not found' % pool)
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
- pool_id = pool_ret.id
- member = self.conn.load_balancer.find_member(name, pool_id)
+ attributes = dict((k, self.params[k])
+ for k in ['monitor_address', 'monitor_port',
+ 'weight']
+ if self.params[k] is not None
+ and self.params[k] != member[k])
- if self.params['state'] == 'present':
- if not member:
- member = self.conn.load_balancer.create_member(
- pool_ret,
- address=self.params['address'],
- name=name,
- protocol_port=self.params['protocol_port'],
- subnet_id=self.params['subnet_id'],
- monitor_address=self.params['monitor_address'],
- monitor_port=self.params['monitor_port']
- )
- changed = True
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self, pool):
+ kwargs = dict((k, self.params[k])
+ for k in ['address', 'monitor_address', 'monitor_port',
+ 'name', 'protocol_port', 'subnet_id', 'weight']
+ if self.params[k] is not None)
+
+ member = self.conn.load_balancer.create_member(pool.id, **kwargs)
+
+ if self.params['wait']:
+ member = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, member,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
+
+ return member
+
+ def _delete(self, member, pool):
+ self.conn.load_balancer.delete_member(member.id, pool.id)
+
+ if self.params['wait']:
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=self.params['timeout'],
+ message="Timeout waiting for load-balancer member to be absent"
+ ):
+ if self.conn.load_balancer.\
+ find_member(member.id, pool.id) is None:
+ break
+
+ def _find(self):
+ name = self.params['name']
+ pool_name_or_id = self.params['pool']
- if not self.params['wait']:
- self.exit_json(
- changed=changed, member=member.to_dict(), id=member.id)
+ pool = self.conn.load_balancer.find_pool(name_or_id=pool_name_or_id,
+ ignore_missing=False)
+ member = self.conn.load_balancer.find_member(name, pool.id)
- if self.params['wait']:
- member = self._wait_for_member_status(
- pool_id, member.id, "ACTIVE", ["ERROR"])
+ return member, pool
- self.exit_json(
- changed=changed, member=member.to_dict(), id=member.id)
+ def _update(self, member, pool, update):
+ attributes = update.get('attributes')
+ if attributes:
+ member = self.conn.load_balancer.update_member(member.id, pool.id,
+ **attributes)
+ if self.params['wait']:
+ member = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, member,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
- elif self.params['state'] == 'absent':
- if member:
- self.conn.load_balancer.delete_member(member, pool_ret)
- changed = True
+ return member
- self.exit_json(changed=changed)
+ def _will_change(self, state, member, pool):
+ if state == 'present' and not member:
+ return True
+ elif state == 'present' and member:
+ return bool(self._build_update(member, pool))
+ elif state == 'absent' and member:
+ return True
+ else:
+ # state == 'absent' and not member:
+ return False
def main():
- module = LoadbalancerMemberModule()
+ module = LoadBalancerMemberModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/lb_pool.py b/ansible_collections/openstack/cloud/plugins/modules/lb_pool.py
index 6f73ea1ce..cd9f72846 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/lb_pool.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/lb_pool.py
@@ -1,261 +1,332 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: lb_pool
-short_description: Add/Delete a pool in the load balancing service from OpenStack Cloud
+short_description: Manage load-balancer pool in a OpenStack cloud.
author: OpenStack Ansible SIG
description:
- - Add or Remove a pool from the OpenStack load-balancer service.
+ - Add, update or remove load-balancer pool from OpenStack cloud.
options:
- name:
- description:
- - Name that has to be given to the pool
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- loadbalancer:
- description:
- - The name or id of the load balancer that this pool belongs to.
- Either loadbalancer or listener must be specified for pool creation.
- type: str
- listener:
- description:
- - The name or id of the listener that this pool belongs to.
- Either loadbalancer or listener must be specified for pool creation.
- type: str
- protocol:
- description:
- - The protocol for the pool.
- choices: [HTTP, HTTPS, PROXY, TCP, UDP]
- default: HTTP
- type: str
- lb_algorithm:
- description:
- - The load balancing algorithm for the pool.
- choices: [LEAST_CONNECTIONS, ROUND_ROBIN, SOURCE_IP]
- default: ROUND_ROBIN
- type: str
- wait:
- description:
- - If the module should wait for the pool to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the pool to get
- into ACTIVE state.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - A human-readable description for the load-balancer pool.
+ type: str
+ lb_algorithm:
+ description:
+ - The load balancing algorithm for the pool.
+ - For example, I(lb_algorithm) could be C(LEAST_CONNECTIONS),
+ C(ROUND_ROBIN), C(SOURCE_IP) or C(SOURCE_IP_PORT).
+ default: ROUND_ROBIN
+ type: str
+ listener:
+ description:
+ - The name or id of the listener that this pool belongs to.
+ - Either I(listener) or I(loadbalancer) must be specified for pool
+ creation.
+ - This attribute cannot be updated.
+ type: str
+ loadbalancer:
+ description:
+ - The name or id of the load balancer that this pool belongs to.
+ - Either I(listener) or I(loadbalancer) must be specified for pool
+ creation.
+ - This attribute cannot be updated.
+ type: str
+ name:
+ description:
+ - Name that has to be given to the pool.
+ - This attribute cannot be updated.
+ required: true
+ type: str
+ protocol:
+ description:
+ - The protocol for the pool.
+ - For example, I(protocol) could be C(HTTP), C(HTTPS), C(PROXY),
+ C(PROXYV2), C(SCTP), C(TCP) and C(UDP).
+ - This attribute cannot be updated.
+ default: HTTP
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
-id:
- description: The pool UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-listener:
- description: Dictionary describing the pool.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the pool.
- type: str
- sample: "test"
- description:
- description: The pool description.
- type: str
- sample: "description"
- loadbalancers:
- description: A list of load balancer IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- listeners:
- description: A list of listener IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- members:
- description: A list of member IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- loadbalancer_id:
- description: The load balancer ID the pool belongs to. This field is set when the pool doesn't belong to any listener in the load balancer.
- type: str
- sample: "7c4be3f8-9c2f-11e8-83b3-44a8422643a4"
- listener_id:
- description: The listener ID the pool belongs to.
- type: str
- sample: "956aa716-9c2f-11e8-83b3-44a8422643a4"
- provisioning_status:
- description: The provisioning status of the pool.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the pool.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the pool.
- type: bool
- sample: true
- protocol:
- description: The protocol for the pool.
- type: str
- sample: "HTTP"
- lb_algorithm:
- description: The load balancing algorithm for the pool.
- type: str
- sample: "ROUND_ROBIN"
+RETURN = r'''
+pool:
+ description: Dictionary describing the load-balancer pool.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ alpn_protocols:
+ description: List of ALPN protocols.
+ type: list
+ created_at:
+ description: Timestamp when the pool was created.
+ type: str
+ description:
+ description: The pool description.
+ type: str
+ health_monitor_id:
+ description: Health Monitor ID.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_admin_state_up:
+ description: The administrative state of the pool.
+ type: bool
+ lb_algorithm:
+ description: The load balancing algorithm for the pool.
+ type: str
+ listener_id:
+ description: The listener ID the pool belongs to.
+ type: str
+ listeners:
+ description: A list of listener IDs.
+ type: list
+ loadbalancer_id:
+ description: The load balancer ID the pool belongs to. This field is set
+ when the pool does not belong to any listener in the load
+ balancer.
+ type: str
+ loadbalancers:
+ description: A list of load balancer IDs.
+ type: list
+ members:
+ description: A list of member IDs.
+ type: list
+ name:
+ description: Name given to the pool.
+ type: str
+ operating_status:
+ description: The operating status of the pool.
+ type: str
+ project_id:
+ description: The ID of the project.
+ type: str
+ protocol:
+ description: The protocol for the pool.
+ type: str
+ provisioning_status:
+ description: The provisioning status of the pool.
+ type: str
+ session_persistence:
+ description: A JSON object specifying the session persistence for the
+ pool.
+ type: dict
+ tags:
+ description: A list of associated tags.
+ type: list
+ tls_ciphers:
+ description: Stores a string of cipher strings in OpenSSL format.
+ type: str
+ tls_enabled:
+ description: Use TLS for connections to backend member servers.
+ type: bool
+ tls_versions:
+ description: A list of TLS protocol versions to be used in by the pool.
+ type: list
+ updated_at:
+ description: Timestamp when the pool was updated.
+ type: str
'''
-EXAMPLES = '''
-# Create a pool, wait for the pool to be active.
-- openstack.cloud.lb_pool:
+EXAMPLES = r'''
+- name: Create a load-balander pool
+ openstack.cloud.lb_pool:
cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-pool
+ lb_algorithm: ROUND_ROBIN
loadbalancer: test-loadbalancer
+ name: test-pool
protocol: HTTP
- lb_algorithm: ROUND_ROBIN
+ state: present
-# Delete a pool
-- openstack.cloud.lb_pool:
+- name: Delete a load-balander pool
+ openstack.cloud.lb_pool:
cloud: mycloud
- endpoint_type: admin
- state: absent
name: test-pool
+ state: absent
'''
-import time
-
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class LoadbalancerPoolModule(OpenStackModule):
+class LoadBalancerPoolModule(OpenStackModule):
argument_spec = dict(
+ description=dict(),
+ lb_algorithm=dict(default='ROUND_ROBIN'),
+ listener=dict(),
+ loadbalancer=dict(),
name=dict(required=True),
+ protocol=dict(default='HTTP'),
state=dict(default='present', choices=['absent', 'present']),
- loadbalancer=dict(default=None),
- listener=dict(default=None),
- protocol=dict(default='HTTP',
- choices=['HTTP', 'HTTPS', 'TCP', 'UDP', 'PROXY']),
- lb_algorithm=dict(
- default='ROUND_ROBIN',
- choices=['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']
- )
)
module_kwargs = dict(
- mutually_exclusive=[['loadbalancer', 'listener']]
+ required_if=[
+ ('state', 'present', ('listener', 'loadbalancer'), True),
+ ],
+ mutually_exclusive=[
+ ('listener', 'loadbalancer')
+ ],
+ supports_check_mode=True,
)
- def _wait_for_pool_status(self, pool_id, status, failures,
- interval=5):
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- pool = self.conn.load_balancer.get_pool(pool_id)
- provisioning_status = pool.provisioning_status
- if provisioning_status == status:
- return pool
- if provisioning_status in failures:
- self.fail_json(
- msg="pool %s transitioned to failure state %s" %
- (pool_id, provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="timeout waiting for pool %s to transition to %s" %
- (pool_id, status)
- )
-
def run(self):
- loadbalancer = self.params['loadbalancer']
- listener = self.params['listener']
-
- changed = False
- pool = self.conn.load_balancer.find_pool(name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if not pool:
- loadbalancer_id = None
- if not (loadbalancer or listener):
- self.fail_json(
- msg="either loadbalancer or listener must be provided"
- )
-
- if loadbalancer:
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer)
- loadbalancer_id = lb.id
-
- listener_id = None
- if listener:
- listener_ret = self.conn.load_balancer.find_listener(listener)
- if not listener_ret:
- self.fail_json(
- msg='listener %s is not found' % listener)
- listener_id = listener_ret.id
-
- pool = self.conn.load_balancer.create_pool(
- name=self.params['name'],
- loadbalancer_id=loadbalancer_id,
- listener_id=listener_id,
- protocol=self.params['protocol'],
- lb_algorithm=self.params['lb_algorithm']
- )
- changed = True
-
- if not self.params['wait']:
- self.exit_json(
- changed=changed, pool=pool.to_dict(), id=pool.id)
-
- if self.params['wait']:
- pool = self._wait_for_pool_status(
- pool.id, "ACTIVE", ["ERROR"])
-
- self.exit_json(
- changed=changed, pool=pool.to_dict(), id=pool.id)
-
- elif self.params['state'] == 'absent':
- if pool:
- self.conn.load_balancer.delete_pool(pool)
- changed = True
-
- self.exit_json(changed=changed)
+ state = self.params['state']
+
+ pool = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, pool))
+
+ if state == 'present' and not pool:
+ # Create pool
+ pool = self._create()
+ self.exit_json(changed=True,
+ pool=pool.to_dict(computed=False))
+
+ elif state == 'present' and pool:
+ # Update pool
+ update = self._build_update(pool)
+ if update:
+ pool = self._update(pool, update)
+
+ self.exit_json(changed=bool(update),
+ pool=pool.to_dict(computed=False))
+
+ elif state == 'absent' and pool:
+ # Delete pool
+ self._delete(pool)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not pool:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, pool):
+ update = {}
+
+ non_updateable_keys = [k for k in ['protocol']
+ if self.params[k] is not None
+ and self.params[k] != pool[k]]
+
+ listener_name_or_id = self.params['listener']
+ if listener_name_or_id:
+ listener = self.conn.load_balancer.find_listener(
+ listener_name_or_id, ignore_missing=False)
+ # Field listener_id is not returned from self.conn.load_balancer.\
+ # find_listener() so use listeners instead.
+ if pool['listeners'] != [dict(id=listener.id)]:
+ non_updateable_keys.append('listener_id')
+
+ loadbalancer_name_or_id = self.params['loadbalancer']
+ if loadbalancer_name_or_id:
+ loadbalancer = self.conn.load_balancer.find_load_balancer(
+ loadbalancer_name_or_id, ignore_missing=False)
+ # Field load_balancer_id is not returned from self.conn.\
+ # load_balancer.find_load_balancer() so use load_balancers instead.
+ if listener['load_balancers'] != [dict(id=loadbalancer.id)]:
+ non_updateable_keys.append('loadbalancer_id')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['description', 'lb_algorithm']
+ if self.params[k] is not None
+ and self.params[k] != pool[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'name', 'protocol',
+ 'lb_algorithm']
+ if self.params[k] is not None)
+
+ listener_name_or_id = self.params['listener']
+ if listener_name_or_id:
+ listener = self.conn.load_balancer.find_listener(
+ listener_name_or_id, ignore_missing=False)
+ kwargs['listener_id'] = listener.id
+
+ loadbalancer_name_or_id = self.params['loadbalancer']
+ if loadbalancer_name_or_id:
+ loadbalancer = self.conn.load_balancer.find_load_balancer(
+ loadbalancer_name_or_id, ignore_missing=False)
+ kwargs['loadbalancer_id'] = loadbalancer.id
+
+ pool = self.conn.load_balancer.create_pool(**kwargs)
+
+ if self.params['wait']:
+ pool = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, pool,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
+
+ return pool
+
+ def _delete(self, pool):
+ self.conn.load_balancer.delete_pool(pool.id)
+
+ if self.params['wait']:
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=self.params['timeout'],
+ message="Timeout waiting for load-balancer pool to be absent"
+ ):
+ if self.conn.load_balancer.\
+ find_pool(pool.id) is None:
+ break
+
+ def _find(self):
+ name = self.params['name']
+ return self.conn.load_balancer.find_pool(name_or_id=name)
+
+ def _update(self, pool, update):
+ attributes = update.get('attributes')
+ if attributes:
+ pool = self.conn.load_balancer.update_pool(pool.id, **attributes)
+
+ if self.params['wait']:
+ pool = self.sdk.resource.wait_for_status(
+ self.conn.load_balancer, pool,
+ status='active',
+ failures=['error'],
+ wait=self.params['timeout'],
+ attribute='provisioning_status')
+
+ return pool
+
+ def _will_change(self, state, pool):
+ if state == 'present' and not pool:
+ return True
+ elif state == 'present' and pool:
+ return bool(self._build_update(pool))
+ elif state == 'absent' and pool:
+ return True
+ else:
+ # state == 'absent' and not pool:
+ return False
def main():
- module = LoadbalancerPoolModule()
+ module = LoadBalancerPoolModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/loadbalancer.py b/ansible_collections/openstack/cloud/plugins/modules/loadbalancer.py
index 336da966c..d2addb731 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/loadbalancer.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/loadbalancer.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -6,15 +7,55 @@
DOCUMENTATION = '''
---
module: loadbalancer
-short_description: Add/Delete load balancer from OpenStack Cloud
+short_description: Manage Octavia load-balancer in an OpenStack cloud
author: OpenStack Ansible SIG
description:
- - Add or Remove load balancer from the OpenStack load-balancer
- service(Octavia). Load balancer update is not supported for now.
+ - Add, update or remove Octavia load-balancer from OpenStack cloud.
options:
+ assign_floating_ip:
+ description:
+ - Allocate floating ip address and associate with the VIP automatically.
+ - Deprecated, use M(openstack.cloud.floating_ip) instead.
+ type: bool
+ default: false
+ aliases: ['auto_public_ip']
+ delete_floating_ip:
+ description:
+ - When I(state) is C(present) and I(delete_floating_ip) is C(true), then
+ any floating ip address associated with the VIP will be deleted.
+ - When I(state) is C(absent) and I(delete_floating_ip) is C(true), then
+ any floating ip address associated with the VIP will be deleted along
+ with the load balancer.
+ - Deprecated, use M(openstack.cloud.floating_ip) instead.
+ type: bool
+ default: false
+ aliases: ['delete_public_ip']
+ description:
+ description:
+ - A human-readable description for the load-balancer.
+ type: str
+ flavor:
+ description:
+ - The flavor of the load balancer.
+ - This attribute cannot be updated.
+ type: str
+ floating_ip_address:
+ description:
+ - Floating ip address aka public ip address associated with the VIP.
+ - Deprecated, use M(openstack.cloud.floating_ip) instead.
+ type: str
+ aliases: ['public_ip_address']
+ floating_ip_network:
+ description:
+ - Name or ID of a Neutron external network where floating ip address will
+ be created on.
+ - Deprecated, use M(openstack.cloud.floating_ip) instead.
+ type: str
+ aliases: ['public_network']
name:
description:
- The name of the load balancer.
+ - This attribute cannot be updated.
required: true
type: str
state:
@@ -23,663 +64,578 @@ options:
choices: [present, absent]
default: present
type: str
- flavor:
+ vip_address:
description:
- - The flavor of the load balancer.
+ - IP address of the load balancer virtual IP.
+ - This attribute cannot be updated.
type: str
vip_network:
description:
- The name or id of the network for the virtual IP of the load balancer.
- One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
- for creation.
- type: str
- vip_subnet:
- description:
- - The name or id of the subnet for the virtual IP of the load balancer.
- One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
+ - One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
for creation.
+ - This attribute cannot be updated.
type: str
vip_port:
description:
- The name or id of the load balancer virtual IP port. One of
- I(vip_network), I(vip_subnet), or I(vip_port) must be specified for
- creation.
- type: str
- vip_address:
- description:
- - IP address of the load balancer virtual IP.
- type: str
- public_ip_address:
- description:
- - Public IP address associated with the VIP.
+ - One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
+ for creation.
+ - This attribute cannot be updated.
type: str
- auto_public_ip:
- description:
- - Allocate a public IP address and associate with the VIP automatically.
- type: bool
- default: 'no'
- public_network:
+ vip_subnet:
description:
- - The name or ID of a Neutron external network.
+ - The name or id of the subnet for the virtual IP of the load balancer.
+ - One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
+ for creation.
+ - This attribute cannot be updated.
type: str
- delete_public_ip:
- description:
- - When C(state=absent) and this option is true, any public IP address
- associated with the VIP will be deleted along with the load balancer.
- type: bool
- default: 'no'
- listeners:
- description:
- - A list of listeners that attached to the load balancer.
- suboptions:
- name:
- description:
- - The listener name or ID.
- protocol:
- description:
- - The protocol for the listener.
- default: HTTP
- protocol_port:
- description:
- - The protocol port number for the listener.
- default: 80
- allowed_cidrs:
- description:
- - A list of IPv4, IPv6 or mix of both CIDRs to be allowed access to the listener. The default is all allowed.
- When a list of CIDRs is provided, the default switches to deny all.
- Ignored on unsupported Octavia versions (less than 2.12)
- default: []
- pool:
- description:
- - The pool attached to the listener.
- suboptions:
- name:
- description:
- - The pool name or ID.
- protocol:
- description:
- - The protocol for the pool.
- default: HTTP
- lb_algorithm:
- description:
- - The load balancing algorithm for the pool.
- default: ROUND_ROBIN
- members:
- description:
- - A list of members that added to the pool.
- suboptions:
- name:
- description:
- - The member name or ID.
- address:
- description:
- - The IP address of the member.
- protocol_port:
- description:
- - The protocol port number for the member.
- default: 80
- subnet:
- description:
- - The name or ID of the subnet the member service is
- accessible from.
- elements: dict
- type: list
- wait:
- description:
- - If the module should wait for the load balancer to be created or
- deleted.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
-id:
- description: The load balancer UUID.
- returned: On success when C(state=present)
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-loadbalancer:
- description: Dictionary describing the load balancer.
- returned: On success when C(state=present)
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the load balancer.
- type: str
- sample: "lingxian_test"
- vip_network_id:
- description: Network ID the load balancer virtual IP port belongs in.
- type: str
- sample: "f171db43-56fd-41cf-82d7-4e91d741762e"
- vip_subnet_id:
- description: Subnet ID the load balancer virtual IP port belongs in.
- type: str
- sample: "c53e3c70-9d62-409a-9f71-db148e7aa853"
- vip_port_id:
- description: The load balancer virtual IP port ID.
- type: str
- sample: "2061395c-1c01-47ab-b925-c91b93df9c1d"
- vip_address:
- description: The load balancer virtual IP address.
- type: str
- sample: "192.168.2.88"
- public_vip_address:
- description: The load balancer public VIP address.
- type: str
- sample: "10.17.8.254"
- provisioning_status:
- description: The provisioning status of the load balancer.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the load balancer.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the load balancer.
- type: bool
- sample: true
- listeners:
- description: The associated listener IDs, if any.
- type: list
- sample: [{"id": "7aa1b380-beec-459c-a8a7-3a4fb6d30645"}, {"id": "692d06b8-c4f8-4bdb-b2a3-5a263cc23ba6"}]
- pools:
- description: The associated pool IDs, if any.
- type: list
- sample: [{"id": "27b78d92-cee1-4646-b831-e3b90a7fa714"}, {"id": "befc1fb5-1992-4697-bdb9-eee330989344"}]
+RETURN = r'''
+floating_ip:
+ description: Dictionary describing the floating ip address attached to the
+ load-balancer.
+ type: dict
+ returned: On success when I(state) is C(present) and I(assign_floating_ip) is
+ C(true).
+ contains:
+ created_at:
+ description: Timestamp at which the floating IP was assigned.
+ type: str
+ description:
+ description: The description of a floating IP.
+ type: str
+ dns_domain:
+ description: The DNS domain.
+ type: str
+ dns_name:
+ description: The DNS name.
+ type: str
+ fixed_ip_address:
+ description: The fixed IP address associated with a floating IP address.
+ type: str
+ floating_ip_address:
+ description: The IP address of a floating IP.
+ type: str
+ floating_network_id:
+ description: The id of the network associated with a floating IP.
+ type: str
+ id:
+ description: Id of the floating ip.
+ type: str
+ name:
+ description: Name of the floating ip.
+ type: str
+ port_details:
+ description: |
+ The details of the port that this floating IP associates
+ with. Present if C(fip-port-details) extension is loaded.
+ type: dict
+ port_id:
+ description: The port ID floating ip associated with.
+ type: str
+ project_id:
+ description: The ID of the project this floating IP is associated with.
+ type: str
+ qos_policy_id:
+ description: The ID of the QoS policy attached to the floating IP.
+ type: str
+ revision_number:
+ description: Revision number.
+ type: str
+ router_id:
+ description: The id of the router floating ip associated with.
+ type: str
+ status:
+ description: |
+ The status of a floating IP, which can be 'ACTIVE' or 'DOWN'.
+ type: str
+ subnet_id:
+ description: The id of the subnet the floating ip associated with.
+ type: str
+ tags:
+ description: List of tags.
+ type: list
+ elements: str
+ updated_at:
+ description: Timestamp at which the floating IP was last updated.
+ type: str
+load_balancer:
+ description: Dictionary describing the load-balancer.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ additional_vips:
+ description: Additional VIPs.
+ type: str
+ availability_zone:
+ description: Name of the target Octavia availability zone.
+ type: str
+ created_at:
+ description: Timestamp when the load balancer was created.
+ type: str
+ description:
+ description: The load balancer description.
+ type: str
+ flavor_id:
+ description: The load balancer flavor ID.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ is_admin_state_up:
+ description: The administrative state of the load balancer.
+ type: bool
+ listeners:
+ description: The associated listener IDs, if any.
+ type: list
+ name:
+ description: Name given to the load balancer.
+ type: str
+ operating_status:
+ description: The operating status of the load balancer.
+ type: str
+ pools:
+ description: The associated pool IDs, if any.
+ type: list
+ project_id:
+ description: The ID of the project this load balancer is associated with.
+ type: str
+ provider:
+ description: Provider name for the load balancer.
+ type: str
+ provisioning_status:
+ description: The provisioning status of the load balancer.
+ type: str
+ tags:
+ description: A list of associated tags.
+ type: str
+ updated_at:
+ description: Timestamp when the load balancer was last updated.
+ type: str
+ vip_address:
+ description: The load balancer virtual IP address.
+ type: str
+ vip_network_id:
+ description: Network ID the load balancer virtual IP port belongs in.
+ type: str
+ vip_port_id:
+ description: The load balancer virtual IP port ID.
+ type: str
+ vip_qos_policy_id:
+ description: VIP qos policy id.
+ type: str
+ vip_subnet_id:
+ description: Subnet ID the load balancer virtual IP port belongs in.
+ type: str
'''
-EXAMPLES = '''
-# Create a load balancer by specifying the VIP subnet.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: present
+EXAMPLES = r'''
+- name: Create a load balancer
+ openstack.cloud.loadbalancer:
+ cloud: devstack
name: my_lb
- vip_subnet: my_subnet
- timeout: 150
-
-# Create a load balancer by specifying the VIP network and the IP address.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
state: present
+ vip_subnet: my_subnet
+
+- name: Create another load balancer
+ openstack.cloud.loadbalancer:
+ cloud: devstack
name: my_lb
- vip_network: my_network
+ state: present
vip_address: 192.168.0.11
+ vip_network: my_network
-# Create a load balancer together with its sub-resources in the 'all in one'
-# way. A public IP address is also allocated to the load balancer VIP.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- name: lingxian_test
- state: present
- vip_subnet: kong_subnet
- auto_public_ip: yes
- public_network: public
- listeners:
- - name: lingxian_80
- protocol: TCP
- protocol_port: 80
- pool:
- name: lingxian_80_pool
- protocol: TCP
- members:
- - name: mywebserver1
- address: 192.168.2.81
- protocol_port: 80
- subnet: webserver_subnet
- - name: lingxian_8080
- protocol: TCP
- protocol_port: 8080
- pool:
- name: lingxian_8080-pool
- protocol: TCP
- members:
- - name: mywebserver2
- address: 192.168.2.82
- protocol_port: 8080
- wait: yes
- timeout: 600
-
-# Delete a load balancer(and all its related resources)
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: absent
+- name: Delete a load balancer and all its related resources
+ openstack.cloud.loadbalancer:
+ cloud: devstack
name: my_lb
-
-# Delete a load balancer(and all its related resources) together with the
-# public IP address(if any) attached to it.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
state: absent
+
+- name: Delete a load balancer, its related resources and its floating ip
+ openstack.cloud.loadbalancer:
+ cloud: devstack
+ delete_floating_ip: true
name: my_lb
- delete_public_ip: yes
+ state: absent
'''
-import time
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class LoadBalancerModule(OpenStackModule):
- def _wait_for_pool(self, pool, provisioning_status, operating_status, failures, interval=5):
- """Wait for pool to be in a particular provisioning and operating status."""
- timeout = self.params['timeout'] # reuse loadbalancer timeout
-
- total_sleep = 0
- if failures is None:
- failures = []
+ argument_spec = dict(
+ assign_floating_ip=dict(default=False, type='bool',
+ aliases=['auto_public_ip']),
+ delete_floating_ip=dict(default=False, type='bool',
+ aliases=['delete_public_ip']),
+ description=dict(),
+ flavor=dict(),
+ floating_ip_address=dict(aliases=['public_ip_address']),
+ floating_ip_network=dict(aliases=['public_network']),
+ name=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ vip_address=dict(),
+ vip_network=dict(),
+ vip_port=dict(),
+ vip_subnet=dict(),
+ )
+ module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('vip_network', 'vip_subnet', 'vip_port'),
+ True)
+ ],
+ mutually_exclusive=[
+ ('assign_floating_ip', 'delete_floating_ip'),
+ ],
+ supports_check_mode=True,
+ )
- while total_sleep < timeout:
- pool = self.conn.load_balancer.find_pool(name_or_id=pool.id)
- if pool:
- if pool.provisioning_status == provisioning_status and pool.operating_status == operating_status:
- return None
- if pool.provisioning_status in failures:
- self.fail_json(
- msg="Pool %s transitioned to failure state %s" %
- (pool.id, pool.provisioning_status)
- )
+ def run(self):
+ state = self.params['state']
+
+ load_balancer = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, load_balancer))
+
+ if state == 'present' and not load_balancer:
+ # Create load_balancer
+ load_balancer, floating_ip = self._create()
+ self.exit_json(
+ changed=True,
+ load_balancer=load_balancer.to_dict(computed=False),
+ **(dict(floating_ip=floating_ip.to_dict(computed=False))
+ if floating_ip is not None else dict()))
+
+ elif state == 'present' and load_balancer:
+ # Update load_balancer
+ update, floating_ip = self._build_update(load_balancer)
+ if update:
+ load_balancer, floating_ip = self._update(load_balancer,
+ update)
+
+ self.exit_json(
+ changed=bool(update),
+ load_balancer=load_balancer.to_dict(computed=False),
+ **(dict(floating_ip=floating_ip.to_dict(computed=False))
+ if floating_ip is not None else dict()))
+
+ elif state == 'absent' and load_balancer:
+ # Delete load_balancer
+ self._delete(load_balancer)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not load_balancer:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, load_balancer):
+ update = {}
+
+ non_updateable_keys = [k for k in ['name', 'vip_address']
+ if self.params[k] is not None
+ and self.params[k] != load_balancer[k]]
+
+ flavor_name_or_id = self.params['flavor']
+ if flavor_name_or_id is not None:
+ flavor = self.conn.load_balancer.find_flavor(
+ flavor_name_or_id, ignore_missing=False)
+ if load_balancer['flavor_id'] != flavor.id:
+ non_updateable_keys.append('flavor_id')
+
+ vip_network_name_or_id = self.params['vip_network']
+ if vip_network_name_or_id is not None:
+ network = self.conn.network.find_network(
+ vip_network_name_or_id, ignore_missing=False)
+ if load_balancer['vip_network_id'] != network.id:
+ non_updateable_keys.append('vip_network_id')
+
+ vip_subnet_name_or_id = self.params['vip_subnet']
+ if vip_subnet_name_or_id is not None:
+ subnet = self.conn.network.find_subnet(
+ vip_subnet_name_or_id, ignore_missing=False)
+ if load_balancer['vip_subnet_id'] != subnet.id:
+ non_updateable_keys.append('vip_subnet_id')
+
+ vip_port_name_or_id = self.params['vip_port']
+ if vip_port_name_or_id is not None:
+ port = self.conn.network.find_port(
+ vip_port_name_or_id, ignore_missing=False)
+ if load_balancer['vip_port_id'] != port.id:
+ non_updateable_keys.append('vip_port_id')
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['description']
+ if self.params[k] is not None
+ and self.params[k] != load_balancer[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ floating_ip, floating_ip_update = \
+ self._build_update_floating_ip(load_balancer)
+
+ return {**update, **floating_ip_update}, floating_ip
+
+ def _build_update_floating_ip(self, load_balancer):
+ assign_floating_ip = self.params['assign_floating_ip']
+ delete_floating_ip = self.params['delete_floating_ip']
+
+ floating_ip_address = self.params['floating_ip_address']
+ if floating_ip_address is not None \
+ and (not assign_floating_ip and not delete_floating_ip):
+ self.fail_json(msg="assign_floating_ip or delete_floating_ip must"
+ " be true when floating_ip_address is set")
+
+ floating_ip_network = self.params['floating_ip_network']
+ if floating_ip_network is not None \
+ and (not assign_floating_ip and not delete_floating_ip):
+ self.fail_json(msg="assign_floating_ip or delete_floating_ip must"
+ " be true when floating_ip_network is set")
+
+ ips = list(self.conn.network.ips(
+ port_id=load_balancer.vip_port_id,
+ fixed_ip_address=load_balancer.vip_address))
+
+ if len(ips) > 1:
+ self.fail_json(msg="Only a single floating ip address"
+ " per load-balancer is supported")
+
+ if delete_floating_ip or not assign_floating_ip:
+ if not ips:
+ return None, {}
+
+ if len(ips) != 1:
+ raise AssertionError("A single floating ip is expected")
+
+ ip = ips[0]
+
+ return ip, {'delete_floating_ip': ip}
+
+ # else assign_floating_ip
+
+ if not ips:
+ return None, dict(
+ assign_floating_ip=dict(
+ floating_ip_address=floating_ip_address,
+ floating_ip_network=floating_ip_network))
+
+ if len(ips) != 1:
+ raise AssertionError("A single floating ip is expected")
+
+ ip = ips[0]
+
+ if floating_ip_network is not None:
+ network = self.conn.network.find_network(floating_ip_network,
+ ignore_missing=False)
+ if ip.floating_network_id != network.id:
+ return ip, dict(
+ assign_floating_ip=dict(
+ floating_ip_address=floating_ip_address,
+ floating_ip_network=floating_ip_network),
+ delete_floating_ip=ip)
+
+ if floating_ip_address is not None \
+ and floating_ip_address != ip.floating_ip_address:
+ return ip, dict(
+ assign_floating_ip=dict(
+ floating_ip_address=floating_ip_address,
+ floating_ip_network=floating_ip_network),
+ delete_floating_ip=ip)
+
+ return ip, {}
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'name', 'vip_address']
+ if self.params[k] is not None)
+
+ flavor_name_or_id = self.params['flavor']
+ if flavor_name_or_id is not None:
+ flavor = self.conn.load_balancer.find_flavor(
+ flavor_name_or_id, ignore_missing=False)
+ kwargs['flavor_id'] = flavor.id
+
+ vip_network_name_or_id = self.params['vip_network']
+ if vip_network_name_or_id is not None:
+ network = self.conn.network.find_network(
+ vip_network_name_or_id, ignore_missing=False)
+ kwargs['vip_network_id'] = network.id
+
+ vip_subnet_name_or_id = self.params['vip_subnet']
+ if vip_subnet_name_or_id is not None:
+ subnet = self.conn.network.find_subnet(
+ vip_subnet_name_or_id, ignore_missing=False)
+ kwargs['vip_subnet_id'] = subnet.id
+
+ vip_port_name_or_id = self.params['vip_port']
+ if vip_port_name_or_id is not None:
+ port = self.conn.network.find_port(
+ vip_port_name_or_id, ignore_missing=False)
+ kwargs['vip_port_id'] = port.id
+
+ load_balancer = self.conn.load_balancer.create_load_balancer(**kwargs)
+
+ if self.params['wait']:
+ load_balancer = self.conn.load_balancer.wait_for_load_balancer(
+ load_balancer.id,
+ wait=self.params['timeout'])
+
+ floating_ip, update = self._build_update_floating_ip(load_balancer)
+ if update:
+ load_balancer, floating_ip = \
+ self._update_floating_ip(load_balancer, update)
+
+ return load_balancer, floating_ip
+
+ def _delete(self, load_balancer):
+ if self.params['delete_floating_ip']:
+ ips = list(self.conn.network.ips(
+ port_id=load_balancer.vip_port_id,
+ fixed_ip_address=load_balancer.vip_address))
+ else:
+ ips = []
+
+ # With cascade=False the deletion of load-balancer
+ # would always fail if there are sub-resources.
+ self.conn.load_balancer.delete_load_balancer(load_balancer.id,
+ cascade=True)
+
+ if self.params['wait']:
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=self.params['timeout'],
+ message="Timeout waiting for load-balancer to be absent"
+ ):
+ if self.conn.load_balancer.\
+ find_load_balancer(load_balancer.id) is None:
+ break
+
+ for ip in ips:
+ self.conn.network.delete_ip(ip)
+
+ def _find(self):
+ name = self.params['name']
+ return self.conn.load_balancer.find_load_balancer(name_or_id=name)
+
+ def _update(self, load_balancer, update):
+ attributes = update.get('attributes')
+ if attributes:
+ load_balancer = \
+ self.conn.load_balancer.update_load_balancer(load_balancer.id,
+ **attributes)
+
+ if self.params['wait']:
+ load_balancer = self.conn.load_balancer.wait_for_load_balancer(
+ load_balancer.id,
+ wait=self.params['timeout'])
+
+ load_balancer, floating_ip = \
+ self._update_floating_ip(load_balancer, update)
+
+ return load_balancer, floating_ip
+
+ def _update_floating_ip(self, load_balancer, update):
+ floating_ip = None
+ delete_floating_ip = update.get('delete_floating_ip')
+ if delete_floating_ip:
+ self.conn.network.delete_ip(delete_floating_ip.id)
+
+ assign_floating_ip = update.get('assign_floating_ip')
+ if assign_floating_ip:
+ floating_ip_address = assign_floating_ip['floating_ip_address']
+ floating_ip_network = assign_floating_ip['floating_ip_network']
+
+ if floating_ip_network is not None:
+ network = self.conn.network.find_network(floating_ip_network,
+ ignore_missing=False)
else:
- if provisioning_status == "DELETED":
- return None
- else:
- self.fail_json(
- msg="Pool %s transitioned to DELETED" % pool.id
- )
-
- time.sleep(interval)
- total_sleep += interval
+ network = None
- def _wait_for_lb(self, lb, status, failures, interval=5):
- """Wait for load balancer to be in a particular provisioning status."""
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- lb = self.conn.load_balancer.find_load_balancer(lb.id)
+ if floating_ip_address is not None:
+ kwargs = ({'floating_network_id': network.id}
+ if network is not None else {})
+ ip = self.conn.network.find_ip(floating_ip_address, **kwargs)
+ else:
+ ip = None
- if lb:
- if lb.provisioning_status == status:
- return None
- if lb.provisioning_status in failures:
+ if ip:
+ if ip['port_id'] is not None:
self.fail_json(
- msg="Load Balancer %s transitioned to failure state %s" %
- (lb.id, lb.provisioning_status)
- )
- else:
- if status == "DELETED":
- return None
+ msg="Floating ip {0} is associated to another fixed ip"
+ " address {1} already".format(
+ ip.floating_ip_address, ip.fixed_ip_address))
+
+ # Associate floating ip
+ floating_ip = self.conn.network.update_ip(
+ ip.id, fixed_ip_address=load_balancer.vip_address,
+ port_id=load_balancer.vip_port_id)
+
+ elif floating_ip_address: # and not ip
+ # Create new floating ip
+ kwargs = ({'floating_network_id': network.id}
+ if network is not None else {})
+ floating_ip = self.conn.network.create_ip(
+ fixed_ip_address=load_balancer.vip_address,
+ floating_ip_address=floating_ip_address,
+ port_id=load_balancer.vip_port_id,
+ **kwargs)
+
+ elif network:
+ # List disassociated floating ips on network
+ ips = [ip
+ for ip in
+ self.conn.network.ips(floating_network_id=network.id)
+ if ip['port_id'] is None]
+ if ips:
+ # Associate first disassociated floating ip
+ ip = ips[0]
+ floating_ip = self.conn.network.update_ip(
+ ip.id, fixed_ip_address=load_balancer.vip_address,
+ port_id=load_balancer.vip_port_id)
else:
- self.fail_json(
- msg="Load Balancer %s transitioned to DELETED" % lb.id
- )
+ # No disassociated floating ips
+ # Create new floating ip on network
+ floating_ip = self.conn.network.create_ip(
+ fixed_ip_address=load_balancer.vip_address,
+ floating_network_id=network.id,
+ port_id=load_balancer.vip_port_id)
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for Load Balancer %s to transition to %s" %
- (lb.id, status)
- )
-
- argument_spec = dict(
- name=dict(required=True),
- flavor=dict(required=False),
- state=dict(default='present', choices=['absent', 'present']),
- vip_network=dict(required=False),
- vip_subnet=dict(required=False),
- vip_port=dict(required=False),
- vip_address=dict(required=False),
- listeners=dict(type='list', default=[], elements='dict'),
- public_ip_address=dict(required=False, default=None),
- auto_public_ip=dict(required=False, default=False, type='bool'),
- public_network=dict(required=False),
- delete_public_ip=dict(required=False, default=False, type='bool'),
- )
- module_kwargs = dict(supports_check_mode=True)
-
- def run(self):
- flavor = self.params['flavor']
- vip_network = self.params['vip_network']
- vip_subnet = self.params['vip_subnet']
- vip_port = self.params['vip_port']
- listeners = self.params['listeners']
- public_vip_address = self.params['public_ip_address']
- allocate_fip = self.params['auto_public_ip']
- delete_fip = self.params['delete_public_ip']
- public_network = self.params['public_network']
-
- vip_network_id = None
- vip_subnet_id = None
- vip_port_id = None
- flavor_id = None
-
- try:
- max_microversion = 1
- max_majorversion = 2
- changed = False
- lb = self.conn.load_balancer.find_load_balancer(
- name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if lb and self.ansible.check_mode:
- self.exit_json(changed=False)
- if lb:
- self.exit_json(changed=False)
- ver_data = self.conn.load_balancer.get_all_version_data()
- region = list(ver_data.keys())[0]
- interface_type = list(ver_data[region].keys())[0]
- versions = ver_data[region][interface_type]['load-balancer']
- for ver in versions:
- if ver['status'] == 'CURRENT':
- curversion = ver['version'].split(".")
- max_majorversion = int(curversion[0])
- max_microversion = int(curversion[1])
-
- if not lb:
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- if not (vip_network or vip_subnet or vip_port):
- self.fail_json(
- msg="One of vip_network, vip_subnet, or vip_port must "
- "be specified for load balancer creation"
- )
-
- if flavor:
- _flavor = self.conn.load_balancer.find_flavor(flavor)
- if not _flavor:
- self.fail_json(
- msg='flavor %s not found' % flavor
- )
- flavor_id = _flavor.id
-
- if vip_network:
- network = self.conn.get_network(vip_network)
- if not network:
- self.fail_json(
- msg='network %s is not found' % vip_network
- )
- vip_network_id = network.id
- if vip_subnet:
- subnet = self.conn.get_subnet(vip_subnet)
- if not subnet:
- self.fail_json(
- msg='subnet %s is not found' % vip_subnet
- )
- vip_subnet_id = subnet.id
- if vip_port:
- port = self.conn.get_port(vip_port)
-
- if not port:
- self.fail_json(
- msg='port %s is not found' % vip_port
- )
- vip_port_id = port.id
- lbargs = {"name": self.params['name'],
- "vip_network_id": vip_network_id,
- "vip_subnet_id": vip_subnet_id,
- "vip_port_id": vip_port_id,
- "vip_address": self.params['vip_address']
- }
- if flavor_id is not None:
- lbargs["flavor_id"] = flavor_id
-
- lb = self.conn.load_balancer.create_load_balancer(**lbargs)
-
- changed = True
-
- if not listeners and not self.params['wait']:
- self.exit_json(
- changed=changed,
- loadbalancer=lb.to_dict(),
- id=lb.id
- )
-
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- for listener_def in listeners:
- listener_name = listener_def.get("name")
- pool_def = listener_def.get("pool")
-
- if not listener_name:
- self.fail_json(msg='listener name is required')
-
- listener = self.conn.load_balancer.find_listener(
- name_or_id=listener_name
- )
-
- if not listener:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- protocol = listener_def.get("protocol", "HTTP")
- protocol_port = listener_def.get("protocol_port", 80)
- allowed_cidrs = listener_def.get("allowed_cidrs", [])
- listenerargs = {"name": listener_name,
- "loadbalancer_id": lb.id,
- "protocol": protocol,
- "protocol_port": protocol_port
- }
- if max_microversion >= 12 and max_majorversion >= 2:
- listenerargs['allowed_cidrs'] = allowed_cidrs
- listener = self.conn.load_balancer.create_listener(**listenerargs)
- changed = True
-
- # Ensure pool in the listener.
- if pool_def:
- pool_name = pool_def.get("name")
- members = pool_def.get('members', [])
-
- if not pool_name:
- self.fail_json(msg='pool name is required')
-
- pool = self.conn.load_balancer.find_pool(name_or_id=pool_name)
-
- if not pool:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- protocol = pool_def.get("protocol", "HTTP")
- lb_algorithm = pool_def.get("lb_algorithm",
- "ROUND_ROBIN")
-
- pool = self.conn.load_balancer.create_pool(
- name=pool_name,
- listener_id=listener.id,
- protocol=protocol,
- lb_algorithm=lb_algorithm
- )
- self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
- changed = True
-
- # Ensure members in the pool
- for member_def in members:
- member_name = member_def.get("name")
- if not member_name:
- self.fail_json(msg='member name is required')
-
- member = self.conn.load_balancer.find_member(member_name,
- pool.id
- )
-
- if not member:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- address = member_def.get("address")
- if not address:
- self.fail_json(
- msg='member address for member %s is '
- 'required' % member_name
- )
-
- subnet_id = member_def.get("subnet")
- if subnet_id:
- subnet = self.conn.get_subnet(subnet_id)
- if not subnet:
- self.fail_json(
- msg='subnet %s for member %s is not '
- 'found' % (subnet_id, member_name)
- )
- subnet_id = subnet.id
-
- protocol_port = member_def.get("protocol_port", 80)
-
- member = self.conn.load_balancer.create_member(
- pool,
- name=member_name,
- address=address,
- protocol_port=protocol_port,
- subnet_id=subnet_id
- )
- self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
- changed = True
-
- # Associate public ip to the load balancer VIP. If
- # public_vip_address is provided, use that IP, otherwise, either
- # find an available public ip or create a new one.
- fip = None
- orig_public_ip = None
- new_public_ip = None
- if public_vip_address or allocate_fip:
- ips = self.conn.network.ips(
- port_id=lb.vip_port_id,
- fixed_ip_address=lb.vip_address
- )
- ips = list(ips)
- if ips:
- orig_public_ip = ips[0]
- new_public_ip = orig_public_ip.floating_ip_address
-
- if public_vip_address and public_vip_address != orig_public_ip:
- fip = self.conn.network.find_ip(public_vip_address)
-
- if not fip:
- self.fail_json(
- msg='Public IP %s is unavailable' % public_vip_address
- )
-
- # Release origin public ip first
- self.conn.network.update_ip(
- orig_public_ip,
- fixed_ip_address=None,
- port_id=None
- )
-
- # Associate new public ip
- self.conn.network.update_ip(
- fip,
- fixed_ip_address=lb.vip_address,
- port_id=lb.vip_port_id
- )
-
- new_public_ip = public_vip_address
- changed = True
- elif allocate_fip and not orig_public_ip:
- fip = self.conn.network.find_available_ip()
- if not fip:
- if not public_network:
- self.fail_json(msg="Public network is not provided")
-
- pub_net = self.conn.network.find_network(public_network)
- if not pub_net:
- self.fail_json(
- msg='Public network %s not found' %
- public_network
- )
- fip = self.conn.network.create_ip(
- floating_network_id=pub_net.id
- )
-
- self.conn.network.update_ip(
- fip,
- fixed_ip_address=lb.vip_address,
- port_id=lb.vip_port_id
- )
-
- new_public_ip = fip.floating_ip_address
- changed = True
-
- # Include public_vip_address in the result.
- lb = self.conn.load_balancer.find_load_balancer(name_or_id=lb.id)
- lb_dict = lb.to_dict()
- lb_dict.update({"public_vip_address": new_public_ip})
-
- self.exit_json(
- changed=changed,
- loadbalancer=lb_dict,
- id=lb.id
- )
- elif self.params['state'] == 'absent':
- changed = False
- public_vip_address = None
-
- if lb:
- if self.ansible.check_mode:
- self.exit_json(changed=True)
- if delete_fip:
- ips = self.conn.network.ips(
- port_id=lb.vip_port_id,
- fixed_ip_address=lb.vip_address
- )
- ips = list(ips)
- if ips:
- public_vip_address = ips[0]
-
- # Deleting load balancer with `cascade=False` does not make
- # sense because the deletion will always fail if there are
- # sub-resources.
- self.conn.load_balancer.delete_load_balancer(lb, cascade=True)
- changed = True
-
- if self.params['wait']:
- self._wait_for_lb(lb, "DELETED", ["ERROR"])
-
- if delete_fip and public_vip_address:
- self.conn.network.delete_ip(public_vip_address)
- changed = True
- elif self.ansible.check_mode:
- self.exit_json(changed=False)
-
- self.exit_json(changed=changed)
- except Exception as e:
- self.fail_json(msg=str(e))
+ else:
+ # Find disassociated floating ip
+ ip = self.conn.network.find_available_ip()
+
+ if ip:
+ # Associate disassociated floating ip
+ floating_ip = self.conn.network.update_ip(
+ ip.id, fixed_ip_address=load_balancer.vip_address,
+ port_id=load_balancer.vip_port_id)
+ else:
+ # Create new floating ip
+ floating_ip = self.conn.network.create_ip(
+ fixed_ip_address=load_balancer.vip_address,
+ port_id=load_balancer.vip_port_id)
+
+ return load_balancer, floating_ip
+
+ def _will_change(self, state, load_balancer):
+ if state == 'present' and not load_balancer:
+ return True
+ elif state == 'present' and load_balancer:
+ return bool(self._build_update(load_balancer)[0])
+ elif state == 'absent' and load_balancer:
+ return True
+ else:
+ # state == 'absent' and not load_balancer:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/network.py b/ansible_collections/openstack/cloud/plugins/modules/network.py
index 780d49ba7..a5ad4f08e 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/network.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/network.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
@@ -10,7 +11,7 @@ module: network
short_description: Creates/removes networks from OpenStack
author: OpenStack Ansible SIG
description:
- - Add or remove network from OpenStack.
+ - Add, update or remove network from OpenStack.
options:
name:
description:
@@ -21,17 +22,14 @@ options:
description:
- Whether this network is shared or not.
type: bool
- default: 'no'
admin_state_up:
description:
- Whether the state should be marked as up or down.
type: bool
- default: 'yes'
external:
description:
- Whether this network is externally accessible.
type: bool
- default: 'no'
state:
description:
- Indicate desired state of the resource.
@@ -61,25 +59,21 @@ options:
description:
- Whether port security is enabled on the network or not.
Network will use OpenStack defaults if this option is
- not utilised. Requires openstacksdk>=0.18.
+ not utilised.
type: bool
- mtu_size:
+ mtu:
description:
- The maximum transmission unit (MTU) value to address fragmentation.
Network will use OpenStack defaults if this option is
- not provided. Requires openstacksdk>=0.18.
+ not provided.
type: int
- aliases: ['mtu']
+ aliases: ['mtu_size']
dns_domain:
description:
- - The DNS domain value to set. Requires openstacksdk>=0.29.
+ - The DNS domain value to set.
Network will use Openstack defaults if this option is
not provided.
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -94,67 +88,96 @@ EXAMPLES = '''
'''
RETURN = '''
+id:
+ description: Id of network
+ returned: On success when network exists.
+ type: str
network:
description: Dictionary describing the network.
- returned: On success when I(state) is 'present'.
- type: complex
+ returned: On success when network exists.
+ type: dict
contains:
+ availability_zone_hints:
+ description: Availability zone hints
+ type: str
+ availability_zones:
+ description: Availability zones
+ type: str
+ created_at:
+ description: Created at timestamp
+ type: str
+ description:
+ description: Description
+ type: str
+ dns_domain:
+ description: Dns domain
+ type: str
id:
- description: Network ID.
+ description: Id
+ type: str
+ ipv4_address_scope_id:
+ description: Ipv4 address scope id
+ type: str
+ ipv6_address_scope_id:
+ description: Ipv6 address scope id
+ type: str
+ is_admin_state_up:
+ description: Is admin state up
+ type: str
+ is_default:
+ description: Is default
+ type: str
+ is_port_security_enabled:
+ description: Is port security enabled
+ type: str
+ is_router_external:
+ description: Is router external
+ type: str
+ is_shared:
+ description: Is shared
+ type: str
+ is_vlan_transparent:
+ description: Is vlan transparent
+ type: str
+ mtu:
+ description: Mtu
type: str
- sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
- description: Network name.
+ description: Name
+ type: str
+ project_id:
+ description: Project id
+ type: str
+ provider_network_type:
+ description: Provider network type
+ type: str
+ provider_physical_network:
+ description: Provider physical network
+ type: str
+ provider_segmentation_id:
+ description: Provider segmentation id
+ type: str
+ qos_policy_id:
+ description: Qos policy id
+ type: str
+ revision_number:
+ description: Revision number
+ type: str
+ segments:
+ description: Segments
type: str
- sample: "ext_network"
- shared:
- description: Indicates whether this network is shared across all tenants.
- type: bool
- sample: false
status:
- description: Network status.
+ description: Status
+ type: str
+ subnet_ids:
+ description: Subnet ids
+ type: str
+ tags:
+ description: Tags
+ type: str
+ updated_at:
+ description: Updated at timestamp
type: str
- sample: "ACTIVE"
- mtu:
- description: The MTU of a network resource.
- type: int
- sample: 0
- dns_domain:
- description: The DNS domain of a network resource.
- type: str
- sample: "sample.openstack.org."
- admin_state_up:
- description: The administrative state of the network.
- type: bool
- sample: true
- port_security_enabled:
- description: The port security status
- type: bool
- sample: true
- router:external:
- description: Indicates whether this network is externally accessible.
- type: bool
- sample: true
- tenant_id:
- description: The tenant ID.
- type: str
- sample: "06820f94b9f54b119636be2728d216fc"
- subnets:
- description: The associated subnets.
- type: list
- sample: []
- "provider:physical_network":
- description: The physical network where this network object is implemented.
- type: str
- sample: my_vlan_net
- "provider:network_type":
- description: The type of physical network that maps to this network resource.
- type: str
- sample: vlan
- "provider:segmentation_id":
- description: An isolated segment on the physical network.
- type: str
- sample: 101
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -164,17 +187,17 @@ class NetworkModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
- shared=dict(default=False, type='bool'),
- admin_state_up=dict(default=True, type='bool'),
- external=dict(default=False, type='bool'),
- provider_physical_network=dict(required=False),
- provider_network_type=dict(required=False),
- provider_segmentation_id=dict(required=False, type='int'),
+ shared=dict(type='bool'),
+ admin_state_up=dict(type='bool'),
+ external=dict(type='bool'),
+ provider_physical_network=dict(),
+ provider_network_type=dict(),
+ provider_segmentation_id=dict(type='int'),
state=dict(default='present', choices=['absent', 'present']),
- project=dict(default=None),
- port_security_enabled=dict(type='bool', min_ver='0.18.0'),
- mtu_size=dict(required=False, type='int', min_ver='0.18.0', aliases=['mtu']),
- dns_domain=dict(required=False, min_ver='0.29.0')
+ project=dict(),
+ port_security_enabled=dict(type='bool'),
+ mtu=dict(type='int', aliases=['mtu_size']),
+ dns_domain=dict()
)
def run(self):
@@ -189,50 +212,88 @@ class NetworkModule(OpenStackModule):
provider_segmentation_id = self.params['provider_segmentation_id']
project = self.params['project']
- kwargs = self.check_versioned(
- mtu_size=self.params['mtu_size'], port_security_enabled=self.params['port_security_enabled'],
- dns_domain=self.params['dns_domain']
- )
+ kwargs = {}
+ for arg in ('port_security_enabled', 'mtu', 'dns_domain'):
+ if self.params[arg] is not None:
+ kwargs[arg] = self.params[arg]
if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
+ proj = self.conn.identity.find_project(project,
+ ignore_missing=False)
project_id = proj['id']
- filters = {'tenant_id': project_id}
+ net_kwargs = {'project_id': project_id}
else:
project_id = None
- filters = None
- net = self.conn.get_network(name, filters=filters)
+ net_kwargs = {}
+ net = self.conn.network.find_network(name, **net_kwargs)
if state == 'present':
+ if provider_physical_network:
+ kwargs['provider_physical_network'] = provider_physical_network
+ if provider_network_type:
+ kwargs['provider_network_type'] = provider_network_type
+ if provider_segmentation_id:
+ kwargs['provider_segmentation_id'] = provider_segmentation_id
+
+ if project_id is not None:
+ kwargs['project_id'] = project_id
+
+ if shared is not None:
+ kwargs["shared"] = shared
+ if admin_state_up is not None:
+ kwargs["admin_state_up"] = admin_state_up
+ if external is not None:
+ kwargs["is_router_external"] = external
+
if not net:
- provider = {}
- if provider_physical_network:
- provider['physical_network'] = provider_physical_network
- if provider_network_type:
- provider['network_type'] = provider_network_type
- if provider_segmentation_id:
- provider['segmentation_id'] = provider_segmentation_id
-
- if project_id is not None:
- net = self.conn.create_network(name, shared, admin_state_up,
- external, provider, project_id,
- **kwargs)
- else:
- net = self.conn.create_network(name, shared, admin_state_up,
- external, provider,
- **kwargs)
+ net = self.conn.network.create_network(name=name, **kwargs)
changed = True
else:
changed = False
- self.exit(changed=changed, network=net, id=net['id'])
+ update_kwargs = {}
+ # Check we are not trying to update an properties that cannot
+ # be modified
+ non_updatables = [
+ "provider_network_type",
+ "provider_physical_network",
+ ]
+ for arg in non_updatables:
+ if arg in kwargs and kwargs[arg] != net[arg]:
+ self.fail_json(
+ msg="The following parameters cannot be updated: "
+ "%s. You will need to use state: absent and "
+ "recreate." % ', '.join(non_updatables)
+ )
+
+ # Filter args to update call to the ones that have been modifed
+ # and are updatable. Adapted from:
+ # https://github.com/openstack/openstacksdk/blob/1ce15c9a8758b4d978eb5239bae100ddc13c8875/openstack/cloud/_network.py#L559-L561
+ for arg in ["shared", "admin_state_up", "is_router_external",
+ "mtu", "port_security_enabled", "dns_domain",
+ "provider_segmentation_id"]:
+ if (
+ arg in kwargs
+ # ensure user wants something specific
+ and kwargs[arg] is not None
+ # and this is not what we have right now
+ and kwargs[arg] != net[arg]
+ ):
+ update_kwargs[arg] = kwargs[arg]
+
+ if update_kwargs:
+ net = self.conn.network.update_network(
+ net.id, **update_kwargs
+ )
+ changed = True
+
+ net = net.to_dict(computed=False)
+ self.exit(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
self.exit(changed=False)
else:
- self.conn.delete_network(name)
+ self.conn.network.delete_network(net['id'])
self.exit(changed=True)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/networks_info.py b/ansible_collections/openstack/cloud/plugins/modules/networks_info.py
index 251af3e72..67883de77 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/networks_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/networks_info.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -10,8 +11,6 @@ short_description: Retrieve information about one or more OpenStack networks.
author: OpenStack Ansible SIG
description:
- Retrieve information about one or more networks from OpenStack.
- - This module was called C(openstack.cloud.networks_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.networks_info) module no longer returns C(ansible_facts)!
options:
name:
description:
@@ -24,10 +23,6 @@ options:
this dictionary may be additional dictionaries.
required: false
type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -44,7 +39,7 @@ EXAMPLES = '''
- name: Show openstack networks
debug:
- msg: "{{ result.openstack_networks }}"
+ msg: "{{ result.networks }}"
- name: Gather information about a previously created network by name
openstack.cloud.networks_info:
@@ -58,7 +53,7 @@ EXAMPLES = '''
- name: Show openstack networks
debug:
- msg: "{{ result.openstack_networks }}"
+ msg: "{{ result.networks }}"
- name: Gather information about a previously created network with filter
# Note: name and filters parameters are Not mutually exclusive
@@ -77,67 +72,119 @@ EXAMPLES = '''
- name: Show openstack networks
debug:
- msg: "{{ result.openstack_networks }}"
+ msg: "{{ result.networks }}"
'''
RETURN = '''
-openstack_networks:
+networks:
description: has all the openstack information about the networks
- returned: always, but can be null
- type: complex
+ returned: always, but can be empty list
+ type: list
+ elements: dict
contains:
+ availability_zone_hints:
+ description: Availability zone hints
+ type: str
+ availability_zones:
+ description: Availability zones
+ type: str
+ created_at:
+ description: Created at timestamp
+ type: str
+ description:
+ description: Description
+ type: str
+ dns_domain:
+ description: Dns domain
+ type: str
id:
- description: Unique UUID.
- returned: success
+ description: Id
+ type: str
+ ipv4_address_scope_id:
+ description: Ipv4 address scope id
+ type: str
+ ipv6_address_scope_id:
+ description: Ipv6 address scope id
+ type: str
+ is_admin_state_up:
+ description: Is admin state up
+ type: str
+ is_default:
+ description: Is default
+ type: str
+ is_port_security_enabled:
+ description: Is port security enabled
+ type: str
+ is_router_external:
+ description: Is router external
+ type: str
+ is_shared:
+ description: Is shared
+ type: str
+ is_vlan_transparent:
+ description: Is vlan transparent
+ type: str
+ mtu:
+ description: Mtu
type: str
name:
- description: Name given to the network.
- returned: success
+ description: Name
+ type: str
+ project_id:
+ description: Project id
+ type: str
+ provider_network_type:
+ description: Provider network type
+ type: str
+ provider_physical_network:
+ description: Provider physical network
+ type: str
+ provider_segmentation_id:
+ description: Provider segmentation id
+ type: str
+ qos_policy_id:
+ description: Qos policy id
+ type: str
+ revision_number:
+ description: Revision number
+ type: str
+ segments:
+ description: Segments
type: str
status:
- description: Network status.
- returned: success
- type: str
- subnets:
- description: Subnet(s) included in this network.
- returned: success
- type: list
- elements: str
- tenant_id:
- description: Tenant id associated with this network.
- returned: success
- type: str
- shared:
- description: Network shared flag.
- returned: success
- type: bool
+ description: Status
+ type: str
+ subnet_ids:
+ description: Subnet ids
+ type: str
+ tags:
+ description: Tags
+ type: str
+ updated_at:
+ description: Updated at timestamp
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class NetworkInfoModule(OpenStackModule):
-
- deprecated_names = ('networks_facts', 'openstack.cloud.networks_facts')
-
argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None)
+ name=dict(),
+ filters=dict(type='dict')
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
-
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
- if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
+ kwargs = {
+ 'filters': self.params['filters'],
+ 'name_or_id': self.params['name']
+ }
networks = self.conn.search_networks(**kwargs)
-
- self.exit(changed=False, openstack_networks=networks)
+ networks = [i.to_dict(computed=False) for i in networks]
+ self.exit(changed=False, networks=networks)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policies_info.py b/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policies_info.py
index b451bc264..f25e9d7fa 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policies_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policies_info.py
@@ -1,235 +1,184 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# (c) 2021, Ashraf Hasson <ahasson@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
DOCUMENTATION = r'''
---
module: neutron_rbac_policies_info
-short_description: Fetch Neutron policies.
+short_description: Fetch Neutron RBAC policies.
author: OpenStack Ansible SIG
description:
- - Get RBAC policies against a network, security group or a QoS Policy for one or more projects.
- - If a C(policy_id) was not provided, this module will attempt to fetch all available policies.
- - Accepts same arguments as OpenStackSDK network proxy C(find_rbac_policy) and C(rbac_policies) functions which are ultimately passed over to C(RBACPolicy)
- - All parameters passed in to this module act as a filter for when no C(policy_id) was provided, otherwise they're ignored.
- - Returns None if no matching policy was found as opposed to failing.
-
+ - Fetch RBAC policies against a network, security group or a QoS Policy for
+ one or more projects.
options:
- policy_id:
+ action:
description:
- - The RBAC policy ID
- - If provided, all other filters are ignored
+ - Action for the RBAC policy.
+ - Can be either of the following options C(access_as_shared) or
+ C(access_as_external).
+ - Logically AND'ed with other filters.
+ choices: ['access_as_shared', 'access_as_external']
type: str
object_id:
description:
- - The object ID (the subject of the policy) to which the RBAC rules applies
- - This would be the ID of a network, security group or a qos policy
- - Mutually exclusive with the C(object_type)
+ - The object ID (the subject of the policy) to which the RBAC rules
+ applies.
+ - This is an ID of a network, security group or a qos policy.
+ - Mutually exclusive with the C(object_type).
type: str
object_type:
description:
- - Can be one of the following object types C(network), C(security_group) or C(qos_policy)
- - Mutually exclusive with the C(object_id)
+ - Type of the object that this RBAC policy affects.
+ - Can be one of the following object types C(network), C(security_group)
+ or C(qos_policy).
+ - Mutually exclusive with the C(object_id).
choices: ['network', 'security_group', 'qos_policy']
type: str
- target_project_id:
- description:
- - Filters the RBAC rules based on the target project id
- - Logically AND'ed with other filters
- - Mutually exclusive with C(project_id)
- type: str
- project_id:
+ policy_id:
description:
- - Filters the RBAC rules based on the project id to which the object belongs to
- - Logically AND'ed with other filters
- - Mutually exclusive with C(target_project_id)
+ - The RBAC policy ID.
+ - If C(policy_id) is not provided, all available policies will be
+ fetched.
+ - If C(policy_id) provided, all other filters are ignored.
type: str
project:
description:
- - Filters the RBAC rules based on the project name
- - Logically AND'ed with other filters
+ - ID or name of the project to which C(object_id) belongs to.
+ - Filters the RBAC rules based on the project name.
+ - Logically AND'ed with other filters.
type: str
- action:
+ aliases: ['project_id']
+ target_project_id:
description:
- - Can be either of the following options C(access_as_shared) | C(access_as_external)
- - Logically AND'ed with other filters
- choices: ['access_as_shared', 'access_as_external']
+ - The ID of the project this RBAC will be enforced.
+ - Filters the RBAC rules based on the target project id.
+ - Logically AND'ed with other filters.
type: str
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
-# Gather all rbac policies for a project
-- name: Get all rbac policies for {{ project }}
+- name: Get all rbac policies for a project
openstack.cloud.neutron_rbac_policies_info:
- project_id: "{{ project.id }}"
+ project: one_project
'''
RETURN = r'''
-# return value can either be plural or signular depending on what was passed in as parameters
-policies:
- description:
- - List of rbac policies, this could also be returned as a singular element, i.e., 'policy'
- type: complex
+rbac_policies:
+ description: List of Neutron RBAC policies.
+ type: list
+ elements: dict
returned: always
contains:
- object_id:
+ action:
description:
- - The UUID of the object to which the RBAC rules apply
+ - The access model specified by the RBAC rules
type: str
- sample: "7422172b-2961-475c-ac68-bd0f2a9960ad"
- target_project_id:
+ sample: "access_as_shared"
+ id:
description:
- - The UUID of the target project
+ - The ID of the RBAC rule/policy
type: str
- sample: "c201a689c016435c8037977166f77368"
- project_id:
+ sample: "4154ce0c-71a7-4d87-a905-09762098ddb9"
+ name:
description:
- - The UUID of the project to which access is granted
+ - The name of the RBAC rule; usually null
type: str
- sample: "84b8774d595b41e89f3dfaa1fd76932c"
+ sample: null
+ object_id:
+ description:
+ - The UUID of the object to which the RBAC rules apply
+ type: str
+ sample: "7422172b-2961-475c-ac68-bd0f2a9960ad"
object_type:
description:
- The object type to which the RBACs apply
type: str
sample: "network"
- action:
+ project_id:
description:
- - The access model specified by the RBAC rules
+ - The UUID of the project to which access is granted
type: str
- sample: "access_as_shared"
- id:
+ sample: "84b8774d595b41e89f3dfaa1fd76932c"
+ target_project_id:
description:
- - The ID of the RBAC rule/policy
+ - The UUID of the target project
type: str
- sample: "4154ce0c-71a7-4d87-a905-09762098ddb9"
- name:
+ sample: "c201a689c016435c8037977166f77368"
+ tenant_id:
description:
- - The name of the RBAC rule; usually null
+ - The UUID of the project to which access is granted. Deprecated.
type: str
- sample: null
- location:
- description:
- - A dictionary of the project details to which access is granted
- type: dict
- sample: >-
- {
- "cloud": "devstack",
- "region_name": "",
- "zone": null,
- "project": {
- "id": "84b8774d595b41e89f3dfaa1fd76932c",
- "name": null,
- "domain_id": null,
- "domain_name": null
- }
- }
+ sample: "84b8774d595b41e89f3dfaa1fd76932c"
+policies:
+ description: Same as C(rbac_policies), kept for backward compatibility.
+ returned: always
+ type: list
+ elements: dict
'''
-import re
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class NeutronRbacPoliciesInfo(OpenStackModule):
+class NeutronRBACPoliciesInfo(OpenStackModule):
argument_spec = dict(
+ action=dict(choices=['access_as_external', 'access_as_shared']),
+ object_id=dict(),
+ object_type=dict(choices=['security_group', 'qos_policy', 'network']),
policy_id=dict(),
- object_id=dict(), # ID of the object that this RBAC policy affects.
- object_type=dict(choices=['security_group', 'qos_policy', 'network']), # Type of the object that this RBAC policy affects.
- target_project_id=dict(), # The ID of the project this RBAC will be enforced.
- project_id=dict(), # The owner project ID.
- project=dict(),
- action=dict(choices=['access_as_external', 'access_as_shared']), # Action for the RBAC policy.
+ project=dict(aliases=['project_id']),
+ target_project_id=dict(),
)
module_kwargs = dict(
+ mutually_exclusive=[
+ ('object_id', 'object_type'),
+ ],
supports_check_mode=True,
)
- def _filter_policies_by(self, policies, key, value):
- filtered = []
- regexp = re.compile(r"location\.project\.([A-Za-z]+)")
- if regexp.match(key):
- attribute = key.split('.')[-1]
- for p in policies:
- if p['location']['project'][attribute] == value:
- filtered.append(p)
- else:
- for p in policies:
- if getattr(p, key) == value:
- filtered.append(p)
-
- return filtered
-
- def _get_rbac_policies(self):
- object_type = self.params.get('object_type')
- project_id = self.params.get('project_id')
- action = self.params.get('action')
-
- search_attributes = {}
- if object_type is not None:
- search_attributes['object_type'] = object_type
- if project_id is not None:
- search_attributes['project_id'] = project_id
- if action is not None:
- search_attributes['action'] = action
-
- try:
- policies = []
- generator = self.conn.network.rbac_policies(**search_attributes)
- for p in generator:
- policies.append(p)
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to get RBAC policies: {0}'.format(str(ex)))
-
- return policies
-
def run(self):
- policy_id = self.params.get('policy_id')
- object_id = self.params.get('object_id')
- object_type = self.params.get('object_type')
- project_id = self.params.get('project_id')
- project = self.params.get('project')
- target_project_id = self.params.get('target_project_id')
-
- if self.ansible.check_mode:
- self.exit_json(changed=False)
-
- if policy_id is not None:
- try:
- policy = self.conn.network.get_rbac_policy(policy_id)
- self.exit_json(changed=False, policy=policy)
- except self.sdk.exceptions.ResourceNotFound:
- self.exit_json(changed=False, policy=None)
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to get RBAC policy: {0}'.format(str(ex)))
+ project_name_or_id = self.params['project']
+ project = None
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(project_name_or_id)
+ if not project:
+ self.exit_json(changed=False, rbac_policies=[], policies=[])
+
+ policy_id = self.params['policy_id']
+ if policy_id:
+ policy = self.conn.network.find_rbac_policy(policy_id)
+ policies = [policy] if policy else []
else:
- if object_id is not None and object_type is not None:
- self.fail_json(msg='object_id and object_type are mutually exclusive, please specify one of the two.')
- if project_id is not None and target_project_id is not None:
- self.fail_json(msg='project_id and target_project_id are mutually exclusive, please specify one of the two.')
+ kwargs = dict((k, self.params[k])
+ for k in ['action', 'object_type']
+ if self.params[k] is not None)
+
+ if project:
+ kwargs['project_id'] = project.id
+
+ policies = list(self.conn.network.rbac_policies(**kwargs))
- filtered_policies = self._get_rbac_policies()
+ for k in ['object_id', 'target_project_id']:
+ if self.params[k] is not None:
+ policies = [p for p in policies if p[k] == self.params[k]]
- if project is not None:
- filtered_policies = self._filter_policies_by(filtered_policies, 'location.project.name', project)
- if object_id is not None:
- filtered_policies = self._filter_policies_by(filtered_policies, 'object_id', object_id)
- if target_project_id is not None:
- filtered_policies = self._filter_policies_by(filtered_policies, 'target_project_id', target_project_id)
+ if project:
+ policies = [p for p in policies
+ if p['location']['project']['id'] == project.id]
- self.exit_json(policies=filtered_policies, changed=False)
+ policies = [p.to_dict(computed=False) for p in policies]
+ self.exit_json(changed=False,
+ rbac_policies=policies,
+ policies=policies)
def main():
- module = NeutronRbacPoliciesInfo()
+ module = NeutronRBACPoliciesInfo()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policy.py b/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policy.py
index f5162e08d..024df2a98 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policy.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/neutron_rbac_policy.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# (c) 2021, Ashraf Hasson <ahasson@redhat.com>
@@ -7,50 +8,62 @@
DOCUMENTATION = r'''
---
module: neutron_rbac_policy
-short_description: Create or delete a Neutron policy to apply a RBAC rule against an object.
+short_description: Create or delete a Neutron RBAC policy.
author: OpenStack Ansible SIG
description:
- - Create a policy to apply a RBAC rule against a network, security group or a QoS Policy or update/delete an existing policy.
- - If a C(policy_id) was provided but not found, this module will attempt to create a new policy rather than error out when updating an existing rule.
- - Accepts same arguments as OpenStackSDK network proxy C(find_rbac_policy) and C(rbac_policies) functions which are ultimately passed over to C(RBACPolicy)
-
+ - Create, update or delete a policy to apply a RBAC rule against a network,
+ security group or QoS Policy.
options:
- policy_id:
+ action:
+ description:
+ - Action for the RBAC policy.
+ - Can be either of the following options C(access_as_shared) or
+ C(access_as_external).
+ - Cannot be changed when updating an existing policy.
+ - Required when creating a RBAC policy rule, ignored when deleting a
+ policy.
+ choices: ['access_as_shared', 'access_as_external']
+ type: str
+ id:
description:
- - The RBAC policy ID
- - Required when deleting or updating an existing RBAC policy rule, ignored otherwise
+ - The RBAC policy ID.
+ - Required when deleting or updating an existing RBAC policy rule,
+ ignored otherwise.
+ - If a I(id) was provided but a policy with this ID cannot be found,
+ an error will be raised.
type: str
+ aliases: ['policy_id']
object_id:
description:
- - The object ID (the subject of the policy) to which the RBAC rule applies
- - Cannot be changed when updating an existing policy
- - Required when creating a RBAC policy rule, ignored when deleting a policy
+ - The object ID (the subject of the policy) to which the RBAC rule
+ applies.
+ - Cannot be changed when updating an existing policy.
+ - Required when creating a RBAC policy rule, ignored when deleting a
+ policy.
type: str
object_type:
description:
- - Can be one of the following object types C(network), C(security_group) or C(qos_policy)
- - Cannot be changed when updating an existing policy
- - Required when creating a RBAC policy rule, ignored when deleting a policy
+ - Type of the object that this RBAC policy affects.
+ - Can be one of the following object types C(network), C(security_group)
+ or C(qos_policy).
+ - Cannot be changed when updating an existing policy.
+ - Required when creating a RBAC policy rule, ignored when deleting a
+ policy.
choices: ['network', 'security_group', 'qos_policy']
type: str
- target_project_id:
- description:
- - The project to which access to be allowed or revoked/disallowed
- - Can be specified/changed when updating an existing policy
- - Required when creating or updating a RBAC policy rule, ignored when deleting a policy
- type: str
project_id:
description:
- - The project to which the object_id belongs
- - Cannot be changed when updating an existing policy
- - Required when creating a RBAC policy rule, ignored when deleting a policy
+ - The ID of the project to which C(object_id) belongs to.
+ - Cannot be changed when updating an existing policy.
+ - Required when creating a RBAC policy rule, ignored when deleting a
+ policy.
type: str
- action:
+ target_project_id:
description:
- - Can be either of the following options C(access_as_shared) | C(access_as_external)
- - Cannot be changed when updating an existing policy
- - Required when creating a RBAC policy rule, ignored when deleting a policy
- choices: ['access_as_shared', 'access_as_external']
+ - The ID of the project to which access to be allowed or revoked aka
+ disallowed.
+ - Required when creating or updating a RBAC policy rule, ignored when
+ deleting a policy.
type: str
state:
description:
@@ -58,249 +71,221 @@ options:
choices: ['present', 'absent']
default: present
type: str
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = r'''
-# Ensure network RBAC policy exists
-- name: Create a new network RBAC policy
+- name: Create or update RBAC policy
neutron_rbac_policy:
object_id: '7422172b-2961-475c-ac68-bd0f2a9960ad'
object_type: 'network'
- target_project_id: 'a12f9ce1de0645e0a0b01c2e679f69ec'
project_id: '84b8774d595b41e89f3dfaa1fd76932d'
+ target_project_id: 'a12f9ce1de0645e0a0b01c2e679f69ec'
-# Update network RBAC policy
-- name: Update an existing network RBAC policy
- neutron_rbac_policy:
- policy_id: 'f625242a-6a73-47ac-8d1f-91440b2c617f'
- target_project_id: '163c89e065a94e069064e551e15daf0e'
-
-# Delete an existing RBAC policy
- name: Delete RBAC policy
openstack.cloud.openstack.neutron_rbac_policy:
- policy_id: 'f625242a-6a73-47ac-8d1f-91440b2c617f'
+ id: 'f625242a-6a73-47ac-8d1f-91440b2c617f'
state: absent
'''
RETURN = r'''
-policy:
- description:
- - A hash representing the policy
- type: complex
+rbac_policy:
+ description: A dictionary describing the RBAC policy.
returned: always
+ type: dict
contains:
- object_id:
+ action:
description:
- - The UUID of the object to which the RBAC rules apply
+ - The access model specified by the RBAC rules
type: str
- sample: "7422172b-2961-475c-ac68-bd0f2a9960ad"
- target_project_id:
+ sample: "access_as_shared"
+ id:
description:
- - The UUID of the target project
+ - The ID of the RBAC rule/policy
type: str
- sample: "c201a689c016435c8037977166f77368"
- project_id:
+ sample: "4154ce0c-71a7-4d87-a905-09762098ddb9"
+ name:
description:
- - The UUID of the project to which access is granted
+ - The name of the RBAC rule; usually null
type: str
- sample: "84b8774d595b41e89f3dfaa1fd76932c"
+ sample: null
+ object_id:
+ description:
+ - The UUID of the object to which the RBAC rules apply
+ type: str
+ sample: "7422172b-2961-475c-ac68-bd0f2a9960ad"
object_type:
description:
- The object type to which the RBACs apply
type: str
sample: "network"
- action:
+ project_id:
description:
- - The access model specified by the RBAC rules
+ - The UUID of the project to which access is granted
type: str
- sample: "access_as_shared"
- id:
+ sample: "84b8774d595b41e89f3dfaa1fd76932c"
+ target_project_id:
description:
- - The ID of the RBAC rule/policy
+ - The UUID of the target project
type: str
- sample: "4154ce0c-71a7-4d87-a905-09762098ddb9"
- name:
+ sample: "c201a689c016435c8037977166f77368"
+ tenant_id:
description:
- - The name of the RBAC rule; usually null
+ - The UUID of the project to which access is granted. Deprecated.
type: str
- sample: null
- location:
- description:
- - A dictionary of the project details to which access is granted
- type: dict
- sample: >-
- {
- "cloud": "devstack",
- "region_name": "",
- "zone": null,
- "project": {
- "id": "84b8774d595b41e89f3dfaa1fd76932c",
- "name": null,
- "domain_id": null,
- "domain_name": null
- }
- }
+ sample: "84b8774d595b41e89f3dfaa1fd76932c"
+policy:
+ description: Same as C(rbac_policy), kept for backward compatibility.
+ returned: always
+ type: dict
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class NeutronRbacPolicy(OpenStackModule):
+class NeutronRBACPolicy(OpenStackModule):
argument_spec = dict(
- policy_id=dict(),
- object_id=dict(), # ID of the object that this RBAC policy affects.
- object_type=dict(choices=['security_group', 'qos_policy', 'network']), # Type of the object that this RBAC policy affects.
- target_project_id=dict(), # The ID of the project this RBAC will be enforced.
- project_id=dict(), # The owner project ID.
- action=dict(choices=['access_as_external', 'access_as_shared']), # Action for the RBAC policy.
- state=dict(default='present', choices=['absent', 'present'])
+ action=dict(choices=['access_as_external', 'access_as_shared']),
+ id=dict(aliases=['policy_id']),
+ object_id=dict(),
+ object_type=dict(choices=['security_group', 'qos_policy', 'network']),
+ project_id=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ target_project_id=dict(),
)
module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ('target_project_id',)),
+ ('state', 'absent', ('id',)),
+ ],
supports_check_mode=True,
)
- def _delete_rbac_policy(self, policy):
- """
- Delete an existing RBAC policy
- returns: the "Changed" state
- """
-
- if policy is None:
- self.fail_json(msg='Must specify policy_id for delete')
-
- try:
- self.conn.network.delete_rbac_policy(policy.id)
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to delete RBAC policy: {0}'.format(str(ex)))
-
- return True
-
- def _create_rbac_policy(self):
- """
- Creates a new RBAC policy
- returns: the "Changed" state of the RBAC policy
- """
-
- object_id = self.params.get('object_id')
- object_type = self.params.get('object_type')
- target_project_id = self.params.get('target_project_id')
- project_id = self.params.get('project_id')
- action = self.params.get('action')
-
- attributes = {
- 'object_id': object_id,
- 'object_type': object_type,
- 'target_project_id': target_project_id,
- 'project_id': project_id,
- 'action': action
- }
-
- if not all(attributes.values()):
- self.fail_json(msg='Missing one or more required parameter for creating a RBAC policy')
-
- try:
- search_attributes = dict(attributes)
- del search_attributes['object_id']
- del search_attributes['target_project_id']
- policies = self.conn.network.rbac_policies(**search_attributes)
- for p in policies:
- if p.object_id == object_id and p.target_project_id == target_project_id:
- return (False, p)
-
- # if no matching policy exists, attempt to create one
- policy = self.conn.network.create_rbac_policy(**attributes)
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to create RBAC policy: {0}'.format(str(ex)))
-
- return (True, policy)
-
- def _update_rbac_policy(self, policy):
- """
- Updates an existing RBAC policy
- returns: the "Changed" state of the RBAC policy
- """
-
- object_id = self.params.get('object_id')
- object_type = self.params.get('object_type')
- target_project_id = self.params.get('target_project_id')
- project_id = self.params.get('project_id')
- action = self.params.get('action')
-
- allowed_attributes = {
- 'rbac_policy': policy.id,
- 'target_project_id': target_project_id
- }
-
- disallowed_attributes = {
- 'object_id': object_id,
- 'object_type': object_type,
- 'project_id': project_id,
- 'action': action
- }
-
- if not all(allowed_attributes.values()):
- self.fail_json(msg='Missing one or more required parameter for updating a RBAC policy')
-
- if any(disallowed_attributes.values()):
- self.fail_json(msg='Cannot change disallowed parameters while updating a RBAC policy: ["object_id", "object_type", "project_id", "action"]')
-
- try:
- policy = self.conn.network.update_rbac_policy(**allowed_attributes)
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to update the RBAC policy: {0}'.format(str(ex)))
-
- return (True, policy)
-
- def _policy_state_change(self, policy):
+ def run(self):
state = self.params['state']
- if state == 'present':
- if not policy:
- return True
- if state == 'absent' and policy:
- return True
- return False
- def run(self):
- policy_id = self.params.get('policy_id')
- state = self.params.get('state')
-
- if policy_id is not None:
- try:
- policy = self.conn.network.get_rbac_policy(policy_id)
- except self.sdk.exceptions.ResourceNotFound:
- policy = None
- except self.sdk.exceptions.OpenStackCloudException as ex:
- self.fail_json(msg='Failed to get RBAC policy: {0}'.format(str(ex)))
- else:
- policy = None
+ policy = self._find()
if self.ansible.check_mode:
- self.exit_json(changed=self._policy_state_change(policy), policy=policy)
-
- if state == 'absent':
- if policy is None and policy_id:
- self.exit_json(changed=False)
- if policy_id is None:
- self.fail_json(msg='Must specify policy_id when state is absent')
- if policy is not None:
- changed = self._delete_rbac_policy(policy)
- self.exit_json(changed=changed)
- # state == 'present'
+ self.exit_json(changed=self._will_change(state, policy))
+
+ if state == 'present' and not policy:
+ # Create policy
+ policy = self._create()
+ self.exit_json(changed=True,
+ rbac_policy=policy.to_dict(computed=False),
+ policy=policy.to_dict(computed=False))
+
+ elif state == 'present' and policy:
+ # Update policy
+ update = self._build_update(policy)
+ if update:
+ policy = self._update(policy, update)
+
+ self.exit_json(changed=bool(update),
+ rbac_policy=policy.to_dict(computed=False),
+ policy=policy.to_dict(computed=False))
+
+ elif state == 'absent' and policy:
+ # Delete policy
+ self._delete(policy)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not policy:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, policy):
+ update = {}
+
+ non_updateable_keys = [k for k in ['object_id', 'object_type',
+ 'project_id', 'action']
+ if self.params[k] is not None
+ and self.params[k] != policy[k]]
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['target_project_id']
+ if self.params[k] is not None
+ and self.params[k] != policy[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['object_id', 'object_type',
+ 'target_project_id', 'project_id',
+ 'action']
+ if self.params[k] is not None)
+
+ return self.conn.network.create_rbac_policy(**kwargs)
+
+ def _delete(self, policy):
+ self.conn.network.delete_rbac_policy(policy.id)
+
+ def _find(self):
+ id = self.params['id']
+
+ if id is not None:
+ return self.conn.network.find_rbac_policy(id)
+
+ matches = self._find_matches()
+ if len(matches) > 1:
+ self.fail_json(msg='Found more a single matching RBAC policy'
+ ' which match the given parameters.')
+ elif len(matches) == 1:
+ return matches[0]
+ else: # len(matches) == 0
+ return None
+
+ def _find_matches(self):
+ missing_keys = [k for k in ['action', 'object_id', 'object_type',
+ 'project_id', 'target_project_id']
+ if self.params[k] is None]
+ if missing_keys:
+ self.fail_json(msg='Missing parameter(s) for finding'
+ ' a matching RBAC policy: {0}'
+ .format(', '.join(missing_keys)))
+
+ kwargs = dict((k, self.params[k])
+ for k in ['action', 'object_type', 'project_id'])
+
+ policies = self.conn.network.rbac_policies(**kwargs)
+
+ return [p for p in policies
+ if any(p[k] == self.params[k]
+ for k in ['object_id', 'target_project_id'])]
+
+ def _update(self, policy, update):
+ attributes = update.get('attributes')
+ if attributes:
+ policy = self.conn.network.update_rbac_policy(policy.id,
+ **attributes)
+
+ return policy
+
+ def _will_change(self, state, policy):
+ if state == 'present' and not policy:
+ return True
+ elif state == 'present' and policy:
+ return bool(self._build_update(policy))
+ elif state == 'absent' and policy:
+ return True
else:
- if policy is None:
- (changed, new_policy) = self._create_rbac_policy()
- else:
- (changed, new_policy) = self._update_rbac_policy(policy)
-
- self.exit_json(changed=changed, policy=new_policy)
+ # state == 'absent' and not policy:
+ return False
def main():
- module = NeutronRbacPolicy()
+ module = NeutronRBACPolicy()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/object.py b/ansible_collections/openstack/cloud/plugins/modules/object.py
index 4a22604ed..e5b930f81 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/object.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/object.py
@@ -1,118 +1,342 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: object
-short_description: Create or Delete objects and containers from OpenStack
+short_description: Create or delete Swift objects in OpenStack clouds
author: OpenStack Ansible SIG
description:
- - Create or Delete objects and containers from OpenStack
+ - Create or delete Swift objects in OpenStack clouds
options:
- container:
- description:
- - The name of the container in which to create the object
- required: true
- type: str
- name:
- description:
- - Name to be give to the object. If omitted, operations will be on
- the entire container
- required: false
- type: str
- filename:
- description:
- - Path to local file to be uploaded.
- required: false
- type: str
- container_access:
- description:
- - desired container access level.
- required: false
- choices: ['private', 'public']
- default: private
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ container:
+ description:
+ - The name (and ID) of the container in which to create the object in.
+ - This container will not be created if it does not exist already.
+ required: true
+ type: str
+ data:
+ description:
+ - The content to upload to the object.
+ - Mutually exclusive with I(filename).
+ - This attribute cannot be updated.
+ type: str
+ filename:
+ description:
+ - The path to the local file whose contents will be uploaded.
+ - Mutually exclusive with I(data).
+ type: str
+ name:
+ description:
+ - Name (and ID) of the object.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the object should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+object:
+ description: Dictionary describing the object.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ accept_ranges:
+ description: The type of ranges that the object accepts.
+ type: str
+ access_control_allow_origin:
+ description: CORS for RAX (deviating from standard)
+ type: str
+ content_disposition:
+ description: If set, specifies the override behavior for the browser.
+ For example, this header might specify that the browser use
+ a download program to save this file rather than show the
+ file, which is the default. If not set, this header is not
+ returned by this operation.
+ type: str
+ content_encoding:
+ description: If set, the value of the Content-Encoding metadata.
+ If not set, this header is not returned by this operation.
+ type: str
+ content_length:
+ description: HEAD operations do not return content. However, in this
+ operation the value in the Content-Length header is not the
+ size of the response body. Instead it contains the size of
+ the object, in bytes.
+ type: str
+ content_type:
+ description: The MIME type of the object.
+ type: int
+ copy_from:
+ description: If set, this is the name of an object used to create the new
+ object by copying the X-Copy-From object. The value is in
+ form {container}/{object}. You must UTF-8-encode and then
+ URL-encode the names of the container and object before you
+ include them in the header. Using PUT with X-Copy-From has
+ the same effect as using the COPY operation to copy an
+ object.
+ type: str
+ delete_after:
+ description: Specifies the number of seconds after which the object is
+ removed. Internally, the Object Storage system stores this
+ value in the X-Delete-At metadata item.
+ type: int
+ delete_at:
+ description: If set, the time when the object will be deleted by the
+ system in the format of a UNIX Epoch timestamp. If not set,
+ this header is not returned by this operation.
+ type: str
+ etag:
+ description: For objects smaller than 5 GB, this value is the MD5
+ checksum of the object content. The value is not quoted.
+ For manifest objects, this value is the MD5 checksum of the
+ concatenated string of MD5 checksums and ETags for each of
+ the segments in the manifest, and not the MD5 checksum of
+ the content that was downloaded. Also the value is enclosed
+ in double-quote characters.
+ You are strongly recommended to compute the MD5 checksum of
+ the response body as it is received and compare this value
+ with the one in the ETag header. If they differ, the content
+ was corrupted, so retry the operation.
+ type: str
+ expires_at:
+ description: Used with temporary URLs to specify the expiry time of the
+ signature. For more information about temporary URLs, see
+ OpenStack Object Storage API v1 Reference.
+ type: str
+ id:
+ description: ID of the object. Equal to C(name).
+ type: str
+ if_match:
+ description: See U(http://www.ietf.org/rfc/rfc2616.txt).
+ type: list
+ if_modified_since:
+ description: See U(http://www.ietf.org/rfc/rfc2616.txt).
+ type: str
+ if_none_match:
+ description: "In combination with C(Expect: 100-Continue), specify an
+ C(If-None-Match: *) header to query whether the server
+ already has a copy of the object before any data is sent."
+ type: list
+ if_unmodified_since:
+ description: See U(http://www.ietf.org/rfc/rfc2616.txt).
+ type: str
+ is_content_type_detected:
+ description: If set to true, Object Storage guesses the content type
+ based on the file extension and ignores the value sent in
+ the Content-Type header, if present.
+ type: bool
+ is_newest:
+ description: If set to True, Object Storage queries all replicas to
+ return the most recent one. If you omit this header, Object
+ Storage responds faster after it finds one valid replica.
+ Because setting this header to True is more expensive for
+ the back end, use it only when it is absolutely needed.
+ type: bool
+ is_static_large_object:
+ description: Set to True if this object is a static large object manifest
+ object.
+ type: bool
+ last_modified_at:
+ description: The date and time that the object was created or the last
+ time that the metadata was changed.
+ type: str
+ manifest:
+ description: If present, this is a dynamic large object manifest object.
+ The value is the container and object name prefix of the
+ segment objects in the form container/prefix.
+ type: str
+ multipart_manifest:
+ description: If you include the multipart-manifest=get query parameter
+ and the object is a large object, the object contents are
+ not returned. Instead, the manifest is returned in the
+ X-Object-Manifest response header for dynamic large objects
+ or in the response body for static large objects.
+ type: str
+ name:
+ description: Name of the object.
+ returned: success
+ type: str
+ object_manifest:
+ description: If set, to this is a dynamic large object manifest object.
+ The value is the container and object name prefix of the
+ segment objects in the form container/prefix.
+ type: str
+ range:
+ description: TODO.
+ type: dict
+ signature:
+ description: Used with temporary URLs to sign the request. For more
+ information about temporary URLs, see OpenStack Object
+ Storage API v1 Reference.
+ type: str
+ symlink_target:
+ description: If present, this is a symlink object. The value is the
+ relative path of the target object in the format
+ <container>/<object>.
+ type: str
+ symlink_target_account:
+ description: If present, and X-Symlink-Target is present, then this is a
+ cross-account symlink to an object in the account specified
+ in the value.
+ type: str
+ timestamp:
+ description: The timestamp of the transaction.
+ type: str
+ transfer_encoding:
+ description: Set to chunked to enable chunked transfer encoding. If used,
+ do not set the Content-Length header to a non-zero value.
+ type: str
'''
-EXAMPLES = '''
-- name: "Create a object named 'fstab' in the 'config' container"
+EXAMPLES = r'''
+- name: Create a object named 'fstab' in the 'config' container
openstack.cloud.object:
cloud: mordred
- state: present
- name: fstab
container: config
filename: /etc/fstab
+ name: fstab
+ state: present
- name: Delete a container called config and all of its contents
openstack.cloud.object:
cloud: rax-iad
- state: absent
container: config
+ state: absent
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class SwiftObjectModule(OpenStackModule):
+class ObjectModule(OpenStackModule):
argument_spec = dict(
- name=dict(required=False, default=None),
container=dict(required=True),
- filename=dict(required=False, default=None),
- container_access=dict(default='private', choices=['private', 'public']),
+ data=dict(),
+ filename=dict(),
+ name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
- module_kwargs = dict()
-
- def process_object(
- self, container, name, filename, container_access, **kwargs
- ):
- changed = False
- container_obj = self.conn.get_container(container)
- if kwargs['state'] == 'present':
- if not container_obj:
- container_obj = self.conn.create_container(container)
- changed = True
- if self.conn.get_container_access(container) != container_access:
- self.conn.set_container_access(container, container_access)
- changed = True
- if name:
- if self.conn.is_object_stale(container, name, filename):
- self.conn.create_object(container, name, filename)
- changed = True
- else:
- if container_obj:
- if name:
- if self.conn.get_object_metadata(container, name):
- self.conn.delete_object(container, name)
- changed = True
- else:
- self.conn.delete_container(container)
- changed = True
- return changed
+
+ module_kwargs = dict(
+ mutually_exclusive=[
+ ('data', 'filename'),
+ ],
+ required_if=[
+ ('state', 'present', ('data', 'filename'), True),
+ ],
+ supports_check_mode=True
+ )
def run(self):
- changed = self.process_object(**self.params)
+ state = self.params['state']
+ object = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, object))
+
+ if state == 'present' and not object:
+ # Create object
+ object = self._create()
+ self.exit_json(changed=True,
+ # metadata is not returned by
+ # to_dict(computed=False) so return it explicitly
+ object=dict(metadata=object.metadata,
+ **object.to_dict(computed=False)))
+
+ elif state == 'present' and object:
+ # Update object
+ update = self._build_update(object)
+ if update:
+ object = self._update(object, update)
- self.exit_json(changed=changed)
+ self.exit_json(changed=bool(update),
+ # metadata is not returned by
+ # to_dict(computed=False) so return it explicitly
+ object=dict(metadata=object.metadata,
+ **object.to_dict(computed=False)))
+
+ elif state == 'absent' and object:
+ # Delete object
+ self._delete(object)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not object:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, object):
+ update = {}
+
+ container_name = self.params['container']
+
+ filename = self.params['filename']
+ if filename is not None:
+ if self.conn.object_store.is_object_stale(container_name,
+ object.id, filename):
+ update['filename'] = filename
+
+ return update
+
+ def _create(self):
+ name = self.params['name']
+ container_name = self.params['container']
+
+ kwargs = dict((k, self.params[k])
+ for k in ['data', 'filename']
+ if self.params[k] is not None)
+
+ return self.conn.object_store.create_object(container_name, name,
+ **kwargs)
+
+ def _delete(self, object):
+ container_name = self.params['container']
+ self.conn.object_store.delete_object(object.id,
+ container=container_name)
+
+ def _find(self):
+ name_or_id = self.params['name']
+ container_name = self.params['container']
+ # openstacksdk has no object_store.find_object() function
+ try:
+ return self.conn.object_store.get_object(name_or_id,
+ container=container_name)
+ except self.sdk.exceptions.ResourceNotFound:
+ return None
+
+ def _update(self, object, update):
+ filename = update.get('filename')
+ if filename:
+ container_name = self.params['container']
+ object = self.conn.object_store.create_object(container_name,
+ object.id,
+ filename=filename)
+
+ return object
+
+ def _will_change(self, state, object):
+ if state == 'present' and not object:
+ return True
+ elif state == 'present' and object:
+ return bool(self._build_update(object))
+ elif state == 'absent' and object:
+ return True
+ else:
+ # state == 'absent' and not object:
+ return False
def main():
- module = SwiftObjectModule()
+ module = ObjectModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/object_container.py b/ansible_collections/openstack/cloud/plugins/modules/object_container.py
index 23ed38e54..188224d7d 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/object_container.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/object_container.py
@@ -1,109 +1,205 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: object_container
-short_description: Manage Swift container.
+short_description: Manage a Swift container.
author: OpenStack Ansible SIG
description:
- - Manage Swift container.
+ - Create, update and delete a Swift container.
options:
- container:
- description: Name of a container in Swift.
- type: str
- required: true
- metadata:
- description:
- - Key/value pairs to be set as metadata on the container.
- - If a container doesn't exist, it will be created.
- - Both custom and system metadata can be set.
- - Custom metadata are keys and values defined by the user.
- - The system metadata keys are content_type, content_encoding, content_disposition, delete_after,\
- delete_at, is_content_type_detected
- type: dict
- required: false
- keys:
- description: Keys from 'metadata' to be deleted.
- type: list
- elements: str
- required: false
delete_with_all_objects:
description:
- - Whether the container should be deleted with all objects or not.
- - Without this parameter set to "true", an attempt to delete a container that contains objects will fail.
+ - Whether the container should be deleted recursively,
+ i.e. including all of its objects.
+ - If I(delete_with_all_objects) is set to C(false), an attempt to
+ delete a container which contains objects will fail.
type: bool
default: False
- required: false
+ delete_metadata_keys:
+ description:
+ - Keys from I(metadata) to be deleted.
+ - "I(metadata) has precedence over I(delete_metadata_keys): If any
+ key is present in both options, then it will be created or updated,
+ not deleted."
+ - Metadata keys are case-insensitive.
+ type: list
+ elements: str
+ aliases: ['keys']
+ metadata:
+ description:
+ - Key value pairs to be set as metadata on the container.
+ - Both custom and system metadata can be set.
+ - Custom metadata are keys and values defined by the user.
+ - I(metadata) is the same as setting properties in openstackclient with
+ C(openstack container set --property ...).
+ - Metadata keys are case-insensitive.
+ type: dict
+ name:
+ description:
+ - Name (and ID) of a Swift container.
+ type: str
+ required: true
+ aliases: ['container']
+ read_ACL:
+ description:
+ - The ACL that grants read access.
+ - For example, use C(.r:*,.rlistings) for public access
+ and C('') for private access.
+ type: str
+ write_ACL:
+ description:
+ - The ACL that grants write access.
+ type: str
state:
- description: Whether resource should be present or absent.
+ description:
+ - Whether the object should be C(present) or C(absent).
default: 'present'
choices: ['present', 'absent']
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
+RETURN = r'''
container:
- description: Specifies the container.
- returned: On success when C(state=present)
+ description: Dictionary describing the Swift container.
+ returned: On success when I(state) is C(present).
type: dict
- sample:
- {
- "bytes": 5449,
- "bytes_used": 5449,
- "content_type": null,
- "count": 1,
- "id": "otc",
- "if_none_match": null,
- "is_content_type_detected": null,
- "is_newest": null,
- "meta_temp_url_key": null,
- "meta_temp_url_key_2": null,
- "name": "otc",
- "object_count": 1,
- "read_ACL": null,
- "sync_key": null,
- "sync_to": null,
- "timestamp": null,
- "versions_location": null,
- "write_ACL": null
- }
+ contains:
+ bytes:
+ description: The total number of bytes that are stored in Object Storage
+ for the container.
+ type: int
+ sample: 5449
+ bytes_used:
+ description: The count of bytes used in total.
+ type: int
+ sample: 5449
+ content_type:
+ description: The MIME type of the list of names.
+ type: str
+ sample: null
+ count:
+ description: The number of objects in the container.
+ type: int
+ sample: 1
+ history_location:
+ description: Enables versioning on the container.
+ type: str
+ sample: null
+ id:
+ description: The ID of the container. Equals I(name).
+ type: str
+ sample: "otc"
+ if_none_match:
+ description: "In combination with C(Expect: 100-Continue), specify an
+ C(If-None-Match: *) header to query whether the server
+ already has a copy of the object before any data is sent."
+ type: str
+ sample: null
+ is_content_type_detected:
+ description: If set to C(true), Object Storage guesses the content type
+ based on the file extension and ignores the value sent in
+ the Content-Type header, if present.
+ type: bool
+ sample: null
+ is_newest:
+ description: If set to True, Object Storage queries all replicas to
+ return the most recent one. If you omit this header, Object
+ Storage responds faster after it finds one valid replica.
+ Because setting this header to True is more expensive for
+ the back end, use it only when it is absolutely needed.
+ type: bool
+ sample: null
+ meta_temp_url_key:
+ description: The secret key value for temporary URLs. If not set,
+ this header is not returned by this operation.
+ type: str
+ sample: null
+ meta_temp_url_key_2:
+ description: A second secret key value for temporary URLs. If not set,
+ this header is not returned by this operation.
+ type: str
+ sample: null
+ name:
+ description: The name of the container.
+ type: str
+ sample: "otc"
+ object_count:
+ description: The number of objects.
+ type: int
+ sample: 1
+ read_ACL:
+ description: The ACL that grants read access. If not set, this header is
+ not returned by this operation.
+ type: str
+ sample: null
+ storage_policy:
+ description: Storage policy used by the container. It is not possible to
+ change policy of an existing container.
+ type: str
+ sample: null
+ sync_key:
+ description: The secret key for container synchronization. If not set,
+ this header is not returned by this operation.
+ type: str
+ sample: null
+ sync_to:
+ description: The destination for container synchronization. If not set,
+ this header is not returned by this operation.
+ type: str
+ sample: null
+ timestamp:
+ description: The timestamp of the transaction.
+ type: str
+ sample: null
+ versions_location:
+ description: Enables versioning on this container. The value is the name
+ of another container. You must UTF-8-encode and then
+ URL-encode the name before you include it in the header. To
+ disable versioning, set the header to an empty string.
+ type: str
+ sample: null
+ write_ACL:
+ description: The ACL that grants write access. If not set, this header is
+ not returned by this operation.
+ type: str
+ sample: null
'''
-EXAMPLES = '''
-# Create empty container
- - openstack.cloud.object_container:
- container: "new-container"
+EXAMPLES = r'''
+- name: Create empty container with public access
+ openstack.cloud.object_container:
+ name: "new-container"
state: present
-
-# Set metadata for container
- - openstack.cloud.object_container:
- container: "new-container"
- metadata: "Cache-Control='no-cache'"
-
-# Delete some keys from metadata of a container
- - openstack.cloud.object_container:
- container: "new-container"
- keys:
- - content_type
-
-# Delete container
- - openstack.cloud.object_container:
- container: "new-container"
+ read_ACL: ".r:*,.rlistings"
+
+- name: Set metadata for container
+ openstack.cloud.object_container:
+ name: "new-container"
+ metadata:
+ 'Cache-Control': 'no-cache'
+ 'foo': 'bar'
+
+- name: Delete metadata keys of a container
+ openstack.cloud.object_container:
+ name: "new-container"
+ delete_metadata_keys:
+ - foo
+
+- name: Delete container
+ openstack.cloud.object_container:
+ name: "new-container"
state: absent
-# Delete container and its objects
- - openstack.cloud.object_container:
- container: "new-container"
+- name: Delete container and all its objects
+ openstack.cloud.object_container:
+ name: "new-container"
delete_with_all_objects: true
state: absent
'''
@@ -114,88 +210,148 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ContainerModule(OpenStackModule):
argument_spec = dict(
- container=dict(type='str', required=True),
- metadata=dict(type='dict', required=False),
- keys=dict(type='list', required=False, elements='str', no_log=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- delete_with_all_objects=dict(type='bool', default=False, required=False)
+ delete_metadata_keys=dict(type='list', elements='str',
+ no_log=False, # := noqa no-log-needed
+ aliases=['keys']),
+ delete_with_all_objects=dict(type='bool', default=False),
+ metadata=dict(type='dict'),
+ name=dict(required=True, aliases=['container']),
+ read_ACL=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ write_ACL=dict(),
)
- def create(self, container):
+ module_kwargs = dict(
+ supports_check_mode=True
+ )
- data = {}
- if self._container_exist(container):
+ def run(self):
+ state = self.params['state']
+ container = self._find()
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, container))
+
+ if state == 'present' and not container:
+ # Create container
+ container = self._create()
+ self.exit_json(changed=True,
+ # metadata is not returned by
+ # to_dict(computed=False) so return it explicitly
+ container=dict(metadata=container.metadata,
+ **container.to_dict(computed=False)))
+
+ elif state == 'present' and container:
+ # Update container
+ update = self._build_update(container)
+ if update:
+ container = self._update(container, update)
+
+ self.exit_json(changed=bool(update),
+ # metadata is not returned by
+ # to_dict(computed=False) so return it explicitly
+ container=dict(metadata=container.metadata,
+ **container.to_dict(computed=False)))
+
+ elif state == 'absent' and container:
+ # Delete container
+ self._delete(container)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not container:
+ # Do nothing
self.exit_json(changed=False)
- container_data = self.conn.object_store.create_container(name=container).to_dict()
- container_data.pop('location')
- data['container'] = container_data
- self.exit_json(changed=True, **data)
-
- def delete(self, container):
-
- delete_with_all_objects = self.params['delete_with_all_objects']
-
- changed = False
- if self._container_exist(container):
- objects = []
- for raw in self.conn.object_store.objects(container):
- dt = raw.to_dict()
- dt.pop('location')
- objects.append(dt)
- if len(objects) > 0:
- if delete_with_all_objects:
- for obj in objects:
- self.conn.object_store.delete_object(container=container, obj=obj['id'])
- else:
- self.fail_json(msg="Container has objects")
- self.conn.object_store.delete_container(container=container)
- changed = True
-
- self.exit(changed=changed)
-
- def set_metadata(self, container, metadata):
-
- data = {}
-
- if not self._container_exist(container):
- new_container = self.conn.object_store.create_container(name=container).to_dict()
-
- new_container = self.conn.object_store.set_container_metadata(container, **metadata).to_dict()
- new_container.pop('location')
- data['container'] = new_container
- self.exit(changed=True, **data)
-
- def delete_metadata(self, container, keys):
-
- if not self._container_exist(container):
- self.fail_json(msg="Container doesn't exist")
+ def _build_update(self, container):
+ update = {}
- self.conn.object_store.delete_container_metadata(container=container, keys=keys)
- self.exit(changed=True)
-
- def _container_exist(self, container):
+ metadata = self.params['metadata']
+ if metadata is not None:
+ # Swift metadata keys must be treated as case-insensitive
+ old_metadata = dict((k.lower(), v)
+ for k, v in (container.metadata or {}))
+ new_metadata = dict((k, v) for k, v in metadata.items()
+ if k.lower() not in old_metadata
+ or v != old_metadata[k.lower()])
+ if new_metadata:
+ update['metadata'] = new_metadata
+
+ delete_metadata_keys = self.params['delete_metadata_keys']
+ if delete_metadata_keys is not None:
+ for key in delete_metadata_keys:
+ if (container.metadata is not None
+ and key.lower() in [k.lower()
+ for k in container.metadata.keys()]):
+ update['delete_metadata_keys'] = delete_metadata_keys
+ break
+
+ attributes = dict((k, self.params[k])
+ for k in ['read_ACL', 'write_ACL']
+ if self.params[k] is not None
+ and self.params[k] != container[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['metadata', 'name', 'read_ACL', 'write_ACL']
+ if self.params[k] is not None)
+
+ return self.conn.object_store.create_container(**kwargs)
+
+ def _delete(self, container):
+ if self.params['delete_with_all_objects']:
+ for object in self.conn.object_store.objects(container.name):
+ self.conn.object_store.delete_object(obj=object.name,
+ container=container.name)
+
+ self.conn.object_store.delete_container(container=container.name)
+
+ def _find(self):
+ name_or_id = self.params['name']
+ # openstacksdk has no container_store.find_container() function
try:
- self.conn.object_store.get_container_metadata(container)
- return True
+ return self.conn.object_store.get_container_metadata(name_or_id)
except self.sdk.exceptions.ResourceNotFound:
- return False
+ return None
+
+ def _update(self, container, update):
+ delete_metadata_keys = update.get('delete_metadata_keys')
+ if delete_metadata_keys:
+ self.conn.object_store.delete_container_metadata(
+ container=container.name, keys=delete_metadata_keys)
+ # object_store.delete_container_metadata() does not delete keys
+ # from metadata dictionary so reload container
+ container = \
+ self.conn.object_store.get_container_metadata(container.name)
+
+ # metadata has higher precedence than delete_metadata_keys
+ # and thus is updated after later
+ metadata = update.get('metadata')
+ if metadata:
+ container = self.conn.object_store.set_container_metadata(
+ container.name, refresh=True, **metadata)
- def run(self):
+ attributes = update.get('attributes')
+ if attributes:
+ container = self.conn.object_store.set_container_metadata(
+ container.name, refresh=True, **attributes)
- container = self.params['container']
- state = self.params['state']
- metadata = self.params['metadata']
- keys = self.params['keys']
-
- if state == 'absent':
- self.delete(container)
- if metadata:
- self.set_metadata(container, metadata)
- if keys:
- self.delete_metadata(container, keys)
+ return container
- self.create(container)
+ def _will_change(self, state, container):
+ if state == 'present' and not container:
+ return True
+ elif state == 'present' and container:
+ return bool(self._build_update(container))
+ elif state == 'absent' and container:
+ return True
+ else:
+ # state == 'absent' and not container:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_auth.py b/ansible_collections/openstack/cloud/plugins/modules/os_auth.py
deleted file mode 100644
index 1f2c516e4..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_auth.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: auth
-short_description: Retrieve an auth token
-author: OpenStack Ansible SIG
-description:
- - Retrieve an auth token from an OpenStack Cloud
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Authenticate to the cloud and retrieve the service catalog
- openstack.cloud.auth:
- cloud: rax-dfw
-
-- name: Show service catalog
- debug:
- var: service_catalog
-'''
-
-RETURN = '''
-auth_token:
- description: Openstack API Auth Token
- returned: success
- type: str
-service_catalog:
- description: A dictionary of available API endpoints
- returned: success
- type: dict
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class AuthModule(OpenStackModule):
- argument_spec = dict()
- module_kwargs = dict()
-
- def run(self):
- self.exit_json(
- changed=False,
- ansible_facts=dict(
- auth_token=self.conn.auth_token,
- service_catalog=self.conn.service_catalog))
-
-
-def main():
- module = AuthModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_client_config.py b/ansible_collections/openstack/cloud/plugins/modules/os_client_config.py
deleted file mode 100644
index 94036e499..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_client_config.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: config
-short_description: Get OpenStack Client config
-description:
- - Get I(openstack) client config data from clouds.yaml or environment
-notes:
- - Facts are placed in the C(openstack.clouds) variable.
-options:
- clouds:
- description:
- - List of clouds to limit the return list to. No value means return
- information on all configured clouds
- required: false
- default: []
- type: list
- elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-author: OpenStack Ansible SIG
-'''
-
-EXAMPLES = '''
-- name: Get list of clouds that do not support security groups
- openstack.cloud.config:
-
-- debug:
- var: "{{ item }}"
- with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}"
-
-- name: Get the information back just about the mordred cloud
- openstack.cloud.config:
- clouds:
- - mordred
-'''
-
-try:
- import openstack.config
- from openstack import exceptions
- HAS_OPENSTACKSDK = True
-except ImportError:
- HAS_OPENSTACKSDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def main():
- module = AnsibleModule(argument_spec=dict(
- clouds=dict(required=False, type='list', default=[], elements='str'),
- ))
-
- if not HAS_OPENSTACKSDK:
- module.fail_json(msg='openstacksdk is required for this module')
-
- p = module.params
-
- try:
- config = openstack.config.OpenStackConfig()
- clouds = []
- for cloud in config.get_all_clouds():
- if not p['clouds'] or cloud.name in p['clouds']:
- cloud.config['name'] = cloud.name
- clouds.append(cloud.config)
- module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
- except exceptions.ConfigException as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster.py b/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster.py
deleted file mode 100644
index feb202a3b..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst IT Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: coe_cluster
-short_description: Add/Remove COE cluster from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove COE cluster from the OpenStack Container Infra service.
-options:
- cluster_template_id:
- description:
- - The template ID of cluster template.
- required: true
- type: str
- discovery_url:
- description:
- - Url used for cluster node discovery
- type: str
- docker_volume_size:
- description:
- - The size in GB of the docker volume
- type: int
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
- type: str
- keypair:
- description:
- - Name of the keypair to use.
- type: str
- labels:
- description:
- - One or more key/value pairs
- type: raw
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
- type: str
- master_count:
- description:
- - The number of master nodes for this cluster
- default: 1
- type: int
- name:
- description:
- - Name that has to be given to the cluster template
- required: true
- type: str
- node_count:
- description:
- - The number of nodes for this cluster
- default: 1
- type: int
- state:
- description:
- - Indicate desired state of the resource.
- choices: [present, absent]
- default: present
- type: str
- timeout:
- description:
- - Timeout for creating the cluster in minutes. Default to 60 mins
- if not set
- default: 60
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The cluster UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-cluster:
- description: Dictionary describing the cluster.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- api_address:
- description:
- - Api address of cluster master node
- type: str
- sample: https://172.24.4.30:6443
- cluster_template_id:
- description: The cluster_template UUID
- type: str
- sample: '7b1418c8-cea8-48fc-995d-52b66af9a9aa'
- coe_version:
- description:
- - Version of the COE software currently running in this cluster
- type: str
- sample: v1.11.1
- container_version:
- description:
- - "Version of the container software. Example: docker version."
- type: str
- sample: 1.12.6
- created_at:
- description:
- - The time in UTC at which the cluster is created
- type: str
- sample: "2018-08-16T10:29:45+00:00"
- create_timeout:
- description:
- - Timeout for creating the cluster in minutes. Default to 60 if
- not set.
- type: int
- sample: 60
- discovery_url:
- description:
- - Url used for cluster node discovery
- type: str
- sample: https://discovery.etcd.io/a42ee38e7113f31f4d6324f24367aae5
- faults:
- description:
- - Fault info collected from the Heat resources of this cluster
- type: dict
- sample: {'0': 'ResourceInError: resources[0].resources...'}
- flavor_id:
- description:
- - The flavor of the minion node for this cluster
- type: str
- sample: c1.c1r1
- keypair:
- description:
- - Name of the keypair to use.
- type: str
- sample: mykey
- labels:
- description: One or more key/value pairs
- type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
- master_addresses:
- description:
- - IP addresses of cluster master nodes
- type: list
- sample: ['172.24.4.5']
- master_count:
- description:
- - The number of master nodes for this cluster.
- type: int
- sample: 1
- master_flavor_id:
- description:
- - The flavor of the master node for this cluster
- type: str
- sample: c1.c1r1
- name:
- description:
- - Name that has to be given to the cluster
- type: str
- sample: k8scluster
- node_addresses:
- description:
- - IP addresses of cluster slave nodes
- type: list
- sample: ['172.24.4.8']
- node_count:
- description:
- - The number of master nodes for this cluster.
- type: int
- sample: 1
- stack_id:
- description:
- - Stack id of the Heat stack
- type: str
- sample: '07767ec6-85f5-44cb-bd63-242a8e7f0d9d'
- status:
- description: Status of the cluster from the heat stack
- type: str
- sample: 'CREATE_COMLETE'
- status_reason:
- description:
- - Status reason of the cluster from the heat stack
- type: str
- sample: 'Stack CREATE completed successfully'
- updated_at:
- description:
- - The time in UTC at which the cluster is updated
- type: str
- sample: '2018-08-16T10:39:25+00:00'
- id:
- description:
- - Unique UUID for this cluster
- type: str
- sample: '86246a4d-a16c-4a58-9e96ad7719fe0f9d'
-'''
-
-EXAMPLES = '''
-# Create a new Kubernetes cluster
-- openstack.cloud.coe_cluster:
- name: k8s
- cluster_template_id: k8s-ha
- keypair: mykey
- master_count: 3
- node_count: 5
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class CoeClusterModule(OpenStackModule):
- argument_spec = dict(
- cluster_template_id=dict(required=True),
- discovery_url=dict(default=None),
- docker_volume_size=dict(type='int'),
- flavor_id=dict(default=None),
- keypair=dict(default=None, no_log=False),
- labels=dict(default=None, type='raw'),
- master_count=dict(type='int', default=1),
- master_flavor_id=dict(default=None),
- name=dict(required=True),
- node_count=dict(type='int', default=1),
- state=dict(default='present', choices=['absent', 'present']),
- timeout=dict(type='int', default=60),
- )
- module_kwargs = dict()
-
- def _parse_labels(self, labels):
- if isinstance(labels, str):
- labels_dict = {}
- for kv_str in labels.split(","):
- k, v = kv_str.split("=")
- labels_dict[k] = v
- return labels_dict
- if not labels:
- return {}
- return labels
-
- def run(self):
- params = self.params.copy()
-
- state = self.params['state']
- name = self.params['name']
- cluster_template_id = self.params['cluster_template_id']
-
- kwargs = dict(
- discovery_url=self.params['discovery_url'],
- docker_volume_size=self.params['docker_volume_size'],
- flavor_id=self.params['flavor_id'],
- keypair=self.params['keypair'],
- labels=self._parse_labels(params['labels']),
- master_count=self.params['master_count'],
- master_flavor_id=self.params['master_flavor_id'],
- node_count=self.params['node_count'],
- create_timeout=self.params['timeout'],
- )
-
- changed = False
- cluster = self.conn.get_coe_cluster(
- name_or_id=name, filters={'cluster_template_id': cluster_template_id})
-
- if state == 'present':
- if not cluster:
- cluster = self.conn.create_coe_cluster(
- name, cluster_template_id=cluster_template_id, **kwargs)
- changed = True
- else:
- changed = False
-
- # NOTE (brtknr): At present, create_coe_cluster request returns
- # cluster_id as `uuid` whereas get_coe_cluster request returns the
- # same field as `id`. This behaviour may change in the future
- # therefore try `id` first then `uuid`.
- cluster_id = cluster.get('id', cluster.get('uuid'))
- cluster['id'] = cluster['uuid'] = cluster_id
- self.exit_json(changed=changed, cluster=cluster, id=cluster_id)
- elif state == 'absent':
- if not cluster:
- self.exit_json(changed=False)
- else:
- self.conn.delete_coe_cluster(name)
- self.exit_json(changed=True)
-
-
-def main():
- module = CoeClusterModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster_template.py b/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster_template.py
deleted file mode 100644
index 0596f39b7..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_coe_cluster_template.py
+++ /dev/null
@@ -1,388 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst IT Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: coe_cluster_template
-short_description: Add/Remove COE cluster template from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove COE cluster template from the OpenStack Container Infra
- service.
-options:
- coe:
- description:
- - The Container Orchestration Engine for this clustertemplate
- choices: [kubernetes, swarm, mesos]
- type: str
- required: true
- dns_nameserver:
- description:
- - The DNS nameserver address
- default: '8.8.8.8'
- type: str
- docker_storage_driver:
- description:
- - Docker storage driver
- choices: [devicemapper, overlay, overlay2]
- type: str
- docker_volume_size:
- description:
- - The size in GB of the docker volume
- type: int
- external_network_id:
- description:
- - The external network to attach to the Cluster
- type: str
- fixed_network:
- description:
- - The fixed network name to attach to the Cluster
- type: str
- fixed_subnet:
- description:
- - The fixed subnet name to attach to the Cluster
- type: str
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
- type: str
- floating_ip_enabled:
- description:
- - Indicates whether created clusters should have a floating ip or not
- type: bool
- default: true
- keypair_id:
- description:
- - Name or ID of the keypair to use.
- type: str
- image_id:
- description:
- - Image id the cluster will be based on
- type: str
- required: true
- labels:
- description:
- - One or more key/value pairs
- type: raw
- http_proxy:
- description:
- - Address of a proxy that will receive all HTTP requests and relay them
- The format is a URL including a port number
- type: str
- https_proxy:
- description:
- - Address of a proxy that will receive all HTTPS requests and relay
- them. The format is a URL including a port number
- type: str
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
- type: str
- master_lb_enabled:
- description:
- - Indicates whether created clusters should have a load balancer
- for master nodes or not
- type: bool
- default: 'no'
- name:
- description:
- - Name that has to be given to the cluster template
- required: true
- type: str
- network_driver:
- description:
- - The name of the driver used for instantiating container networks
- choices: [flannel, calico, docker]
- type: str
- no_proxy:
- description:
- - A comma separated list of IPs for which proxies should not be
- used in the cluster
- type: str
- public:
- description:
- - Indicates whether the ClusterTemplate is public or not
- type: bool
- default: 'no'
- registry_enabled:
- description:
- - Indicates whether the docker registry is enabled
- type: bool
- default: 'no'
- server_type:
- description:
- - Server type for this ClusterTemplate
- choices: [vm, bm]
- default: vm
- type: str
- state:
- description:
- - Indicate desired state of the resource.
- choices: [present, absent]
- default: present
- type: str
- tls_disabled:
- description:
- - Indicates whether the TLS should be disabled
- type: bool
- default: 'no'
- volume_driver:
- description:
- - The name of the driver used for instantiating container volumes
- choices: [cinder, rexray]
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The cluster UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-cluster_template:
- description: Dictionary describing the template.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- coe:
- description: The Container Orchestration Engine for this clustertemplate
- type: str
- sample: kubernetes
- dns_nameserver:
- description: The DNS nameserver address
- type: str
- sample: '8.8.8.8'
- docker_storage_driver:
- description: Docker storage driver
- type: str
- sample: devicemapper
- docker_volume_size:
- description: The size in GB of the docker volume
- type: int
- sample: 5
- external_network_id:
- description: The external network to attach to the Cluster
- type: str
- sample: public
- fixed_network:
- description: The fixed network name to attach to the Cluster
- type: str
- sample: 07767ec6-85f5-44cb-bd63-242a8e7f0d9d
- fixed_subnet:
- description:
- - The fixed subnet name to attach to the Cluster
- type: str
- sample: 05567ec6-85f5-44cb-bd63-242a8e7f0d9d
- flavor_id:
- description:
- - The flavor of the minion node for this ClusterTemplate
- type: str
- sample: c1.c1r1
- floating_ip_enabled:
- description:
- - Indicates whether created clusters should have a floating ip or not
- type: bool
- sample: true
- keypair_id:
- description:
- - Name or ID of the keypair to use.
- type: str
- sample: mykey
- image_id:
- description:
- - Image id the cluster will be based on
- type: str
- sample: 05567ec6-85f5-44cb-bd63-242a8e7f0e9d
- labels:
- description: One or more key/value pairs
- type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
- http_proxy:
- description:
- - Address of a proxy that will receive all HTTP requests and relay them
- The format is a URL including a port number
- type: str
- sample: http://10.0.0.11:9090
- https_proxy:
- description:
- - Address of a proxy that will receive all HTTPS requests and relay
- them. The format is a URL including a port number
- type: str
- sample: https://10.0.0.10:8443
- master_flavor_id:
- description:
- - The flavor of the master node for this ClusterTemplate
- type: str
- sample: c1.c1r1
- master_lb_enabled:
- description:
- - Indicates whether created clusters should have a load balancer
- for master nodes or not
- type: bool
- sample: true
- name:
- description:
- - Name that has to be given to the cluster template
- type: str
- sample: k8scluster
- network_driver:
- description:
- - The name of the driver used for instantiating container networks
- type: str
- sample: calico
- no_proxy:
- description:
- - A comma separated list of IPs for which proxies should not be
- used in the cluster
- type: str
- sample: 10.0.0.4,10.0.0.5
- public:
- description:
- - Indicates whether the ClusterTemplate is public or not
- type: bool
- sample: false
- registry_enabled:
- description:
- - Indicates whether the docker registry is enabled
- type: bool
- sample: false
- server_type:
- description:
- - Server type for this ClusterTemplate
- type: str
- sample: vm
- tls_disabled:
- description:
- - Indicates whether the TLS should be disabled
- type: bool
- sample: false
- volume_driver:
- description:
- - The name of the driver used for instantiating container volumes
- type: str
- sample: cinder
-'''
-
-EXAMPLES = '''
-# Create a new Kubernetes cluster template
-- openstack.cloud.coe_cluster_template:
- name: k8s
- coe: kubernetes
- keypair_id: mykey
- image_id: 2a8c9888-9054-4b06-a1ca-2bb61f9adb72
- public: no
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class CoeClusterTemplateModule(OpenStackModule):
- argument_spec = dict(
- coe=dict(required=True, choices=['kubernetes', 'swarm', 'mesos']),
- dns_nameserver=dict(default='8.8.8.8'),
- docker_storage_driver=dict(choices=['devicemapper', 'overlay', 'overlay2']),
- docker_volume_size=dict(type='int'),
- external_network_id=dict(default=None),
- fixed_network=dict(default=None),
- fixed_subnet=dict(default=None),
- flavor_id=dict(default=None),
- floating_ip_enabled=dict(type='bool', default=True),
- keypair_id=dict(default=None),
- image_id=dict(required=True),
- labels=dict(default=None, type='raw'),
- http_proxy=dict(default=None),
- https_proxy=dict(default=None),
- master_lb_enabled=dict(type='bool', default=False),
- master_flavor_id=dict(default=None),
- name=dict(required=True),
- network_driver=dict(choices=['flannel', 'calico', 'docker']),
- no_proxy=dict(default=None),
- public=dict(type='bool', default=False),
- registry_enabled=dict(type='bool', default=False),
- server_type=dict(default="vm", choices=['vm', 'bm']),
- state=dict(default='present', choices=['absent', 'present']),
- tls_disabled=dict(type='bool', default=False),
- volume_driver=dict(choices=['cinder', 'rexray']),
- )
- module_kwargs = dict()
-
- def _parse_labels(self, labels):
- if isinstance(labels, str):
- labels_dict = {}
- for kv_str in labels.split(","):
- k, v = kv_str.split("=")
- labels_dict[k] = v
- return labels_dict
- if not labels:
- return {}
- return labels
-
- def run(self):
- params = self.params.copy()
-
- state = self.params['state']
- name = self.params['name']
- coe = self.params['coe']
- image_id = self.params['image_id']
-
- kwargs = dict(
- dns_nameserver=self.params['dns_nameserver'],
- docker_storage_driver=self.params['docker_storage_driver'],
- docker_volume_size=self.params['docker_volume_size'],
- external_network_id=self.params['external_network_id'],
- fixed_network=self.params['fixed_network'],
- fixed_subnet=self.params['fixed_subnet'],
- flavor_id=self.params['flavor_id'],
- floating_ip_enabled=self.params['floating_ip_enabled'],
- keypair_id=self.params['keypair_id'],
- labels=self._parse_labels(params['labels']),
- http_proxy=self.params['http_proxy'],
- https_proxy=self.params['https_proxy'],
- master_lb_enabled=self.params['master_lb_enabled'],
- master_flavor_id=self.params['master_flavor_id'],
- network_driver=self.params['network_driver'],
- no_proxy=self.params['no_proxy'],
- public=self.params['public'],
- registry_enabled=self.params['registry_enabled'],
- server_type=self.params['server_type'],
- tls_disabled=self.params['tls_disabled'],
- volume_driver=self.params['volume_driver'],
- )
-
- changed = False
- template = self.conn.get_coe_cluster_template(
- name_or_id=name, filters={'coe': coe, 'image_id': image_id})
-
- if state == 'present':
- if not template:
- template = self.conn.create_coe_cluster_template(
- name, coe=coe, image_id=image_id, **kwargs)
- changed = True
- else:
- changed = False
-
- self.exit_json(
- changed=changed, cluster_template=template, id=template['uuid'])
- elif state == 'absent':
- if not template:
- self.exit_json(changed=False)
- else:
- self.conn.delete_coe_cluster_template(name)
- self.exit_json(changed=True)
-
-
-def main():
- module = CoeClusterTemplateModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_flavor_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_flavor_info.py
deleted file mode 100644
index 61ee7a5b7..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_flavor_info.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 IBM
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: compute_flavor_info
-short_description: Retrieve information about one or more flavors
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about available OpenStack instance flavors. By default,
- information about ALL flavors are retrieved. Filters can be applied to get
- information for only matching flavors. For example, you can filter on the
- amount of RAM available to the flavor, or the number of virtual CPUs
- available to the flavor, or both. When specifying multiple filters,
- *ALL* filters must match on a flavor before that flavor is returned as
- a fact.
- - This module was called C(openstack.cloud.compute_flavor_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.compute_flavor_info) module no longer returns C(ansible_facts)!
-notes:
- - The result contains a list of unsorted flavors.
-options:
- name:
- description:
- - A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
- type: str
- ram:
- description:
- - "A string used for filtering flavors based on the amount of RAM
- (in MB) desired. This string accepts the following special values:
- 'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
- (return flavors with the maximum amount of RAM)."
-
- - "A specific amount of RAM may also be specified. Any flavors with this
- exact amount of RAM will be returned."
-
- - "A range of acceptable RAM may be given using a special syntax. Simply
- prefix the amount of RAM with one of these acceptable range values:
- '<', '>', '<=', '>='. These values represent less than, greater than,
- less than or equal to, and greater than or equal to, respectively."
- type: str
- vcpus:
- description:
- - A string used for filtering flavors based on the number of virtual
- CPUs desired. Format is the same as the I(ram) parameter.
- type: str
- limit:
- description:
- - Limits the number of flavors returned. All matching flavors are
- returned by default.
- type: int
- ephemeral:
- description:
- - A string used for filtering flavors based on the amount of ephemeral
- storage. Format is the same as the I(ram) parameter
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about all available flavors
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- register: result
-
-- debug:
- msg: "{{ result.openstack_flavors }}"
-
-# Gather information for the flavor named "xlarge-flavor"
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- name: "xlarge-flavor"
-
-# Get all flavors that have exactly 512 MB of RAM.
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- ram: "512"
-
-# Get all flavors that have 1024 MB or more of RAM.
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- ram: ">=1024"
-
-# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
-# option will guarantee only a single flavor is returned.
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- ram: "MIN"
- limit: 1
-
-# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- ram: ">=1024"
- vcpus: "2"
-
-# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
-# less than 30gb of ephemeral storage.
-- openstack.cloud.compute_flavor_info:
- cloud: mycloud
- ram: ">=1024"
- vcpus: "2"
- ephemeral: "<30"
-'''
-
-
-RETURN = '''
-openstack_flavors:
- description: Dictionary describing the flavors.
- returned: On success.
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
- description:
- description: Description of the flavor
- returned: success
- type: str
- sample: "Small flavor"
- is_disabled:
- description: Wether the flavor is enabled or not
- returned: success
- type: bool
- sample: False
- rxtx_factor:
- description: Factor to be multiplied by the rxtx_base property of
- the network it is attached to in order to have a
- different bandwidth cap.
- returned: success
- type: float
- sample: 1.0
- extra_specs:
- description: Optional parameters to configure different flavors
- options.
- returned: success
- type: dict
- sample: "{'hw_rng:allowed': True}"
- disk:
- description: Size of local disk, in GB.
- returned: success
- type: int
- sample: 10
- ephemeral:
- description: Ephemeral space size, in GB.
- returned: success
- type: int
- sample: 10
- ram:
- description: Amount of memory, in MB.
- returned: success
- type: int
- sample: 1024
- swap:
- description: Swap space size, in MB.
- returned: success
- type: int
- sample: 100
- vcpus:
- description: Number of virtual CPUs.
- returned: success
- type: int
- sample: 2
- is_public:
- description: Make flavor accessible to the public.
- returned: success
- type: bool
- sample: true
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ComputeFlavorInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=False, default=None),
- ram=dict(required=False, default=None),
- vcpus=dict(required=False, default=None),
- limit=dict(required=False, default=None, type='int'),
- ephemeral=dict(required=False, default=None),
- )
- module_kwargs = dict(
- mutually_exclusive=[
- ['name', 'ram'],
- ['name', 'vcpus'],
- ['name', 'ephemeral']
- ],
- supports_check_mode=True
- )
-
- deprecated_names = ('openstack.cloud.compute_flavor_facts')
-
- def run(self):
- name = self.params['name']
- vcpus = self.params['vcpus']
- ram = self.params['ram']
- ephemeral = self.params['ephemeral']
- limit = self.params['limit']
-
- filters = {}
- if vcpus:
- filters['vcpus'] = vcpus
- if ram:
- filters['ram'] = ram
- if ephemeral:
- filters['ephemeral'] = ephemeral
-
- if name:
- # extra_specs are exposed in the flavor representation since Rocky, so we do not
- # need get_extra_specs=True which is not available in OpenStack SDK 0.36 (Train)
- # Ref.: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html
- flavor = self.conn.compute.find_flavor(name)
- flavors = [flavor] if flavor else []
-
- else:
- flavors = list(self.conn.compute.flavors())
- if filters:
- flavors = self.conn.range_search(flavors, filters)
-
- if limit is not None:
- flavors = flavors[:limit]
-
- # Transform entries to dict
- flavors = [flavor.to_dict(computed=True) for flavor in flavors]
- self.exit_json(changed=False, openstack_flavors=flavors)
-
-
-def main():
- module = ComputeFlavorInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_floating_ip.py b/ansible_collections/openstack/cloud/plugins/modules/os_floating_ip.py
deleted file mode 100644
index 6b5fb0d66..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_floating_ip.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: floating_ip
-author: OpenStack Ansible SIG
-short_description: Add/Remove floating IP from an instance
-description:
- - Add or Remove a floating IP to an instance.
- - Returns the floating IP when attaching only if I(wait=true).
- - When detaching a floating IP there might be a delay until an instance does not list the floating IP any more.
-options:
- server:
- description:
- - The name or ID of the instance to which the IP address
- should be assigned.
- required: true
- type: str
- network:
- description:
- - The name or ID of a neutron external network or a nova pool name.
- type: str
- floating_ip_address:
- description:
- - A floating IP address to attach or to detach. When I(state) is present
- can be used to specify a IP address to attach. I(floating_ip_address)
- requires I(network) to be set.
- type: str
- reuse:
- description:
- - When I(state) is present, and I(floating_ip_address) is not present,
- this parameter can be used to specify whether we should try to reuse
- a floating IP address already allocated to the project.
- type: bool
- default: 'no'
- fixed_address:
- description:
- - To which fixed IP of server the floating IP address should be
- attached to.
- type: str
- nat_destination:
- description:
- - The name or id of a neutron private network that the fixed IP to
- attach floating IP is on
- aliases: ["fixed_network", "internal_network"]
- type: str
- wait:
- description:
- - When attaching a floating IP address, specify whether to wait for it to appear as attached.
- - Must be set to C(yes) for the module to return the value of the floating IP when attaching.
- type: bool
- default: 'no'
- timeout:
- description:
- - Time to wait for an IP address to appear as attached. See wait.
- required: false
- default: 60
- type: int
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- purge:
- description:
- - When I(state) is absent, indicates whether or not to delete the floating
- IP completely, or only detach it from the server. Default is to detach only.
- type: bool
- default: 'no'
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Assign a floating IP to the first interface of `cattle001` from an existing
-# external network or nova pool. A new floating IP from the first available
-# external network is allocated to the project.
-- openstack.cloud.floating_ip:
- cloud: dguerri
- server: cattle001
-
-# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
-# `cattle001`. If a free floating IP is already allocated to the project, it is
-# reused; if not, a new one is created.
-- openstack.cloud.floating_ip:
- cloud: dguerri
- state: present
- reuse: yes
- server: cattle001
- network: ext_net
- fixed_address: 192.0.2.3
- wait: true
- timeout: 180
-
-# Assign a new floating IP from the network `ext_net` to the instance fixed
-# ip in network `private_net` of `cattle001`.
-- openstack.cloud.floating_ip:
- cloud: dguerri
- state: present
- server: cattle001
- network: ext_net
- nat_destination: private_net
- wait: true
- timeout: 180
-
-# Detach a floating IP address from a server
-- openstack.cloud.floating_ip:
- cloud: dguerri
- state: absent
- floating_ip_address: 203.0.113.2
- server: cattle001
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-import itertools
-
-
-class NetworkingFloatingIPModule(OpenStackModule):
- argument_spec = dict(
- server=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- network=dict(required=False, default=None),
- floating_ip_address=dict(required=False, default=None),
- reuse=dict(required=False, type='bool', default=False),
- fixed_address=dict(required=False, default=None),
- nat_destination=dict(required=False, default=None,
- aliases=['fixed_network', 'internal_network']),
- wait=dict(required=False, type='bool', default=False),
- timeout=dict(required=False, type='int', default=60),
- purge=dict(required=False, type='bool', default=False),
- )
-
- module_kwargs = dict(
- required_if=[
- ['state', 'absent', ['floating_ip_address']]
- ],
- required_by=dict(
- floating_ip_address=('network',)
- )
- )
-
- def _get_floating_ip(self, floating_ip_address):
- f_ips = self.conn.search_floating_ips(
- filters={'floating_ip_address': floating_ip_address})
-
- if not f_ips:
- return None
-
- return f_ips[0]
-
- def _list_floating_ips(self, server):
- return itertools.chain.from_iterable([
- (addr['addr'] for addr in server.addresses[net] if addr['OS-EXT-IPS:type'] == 'floating')
- for net in server.addresses
- ])
-
- def _match_floating_ip(self, server,
- floating_ip_address,
- network_id,
- fixed_address,
- nat_destination):
-
- if floating_ip_address:
- return self._get_floating_ip(floating_ip_address)
- elif not fixed_address and nat_destination:
- nat_destination_name = self.conn.get_network(nat_destination)['name']
- return next(
- (self._get_floating_ip(addr['addr'])
- for addr in server.addresses.get(nat_destination_name, [])
- if addr['OS-EXT-IPS:type'] == 'floating'),
- None)
- else:
- # not floating_ip_address and (fixed_address or not nat_destination)
-
- # get any of the floating ips that matches fixed_address and/or network
- f_ip_addrs = self._list_floating_ips(server)
- f_ips = [f_ip for f_ip in self.conn.list_floating_ips() if f_ip['floating_ip_address'] in f_ip_addrs]
- return next(
- (f_ip for f_ip in f_ips
- if ((fixed_address and f_ip.fixed_ip_address == fixed_address) or not fixed_address)
- and ((network_id and f_ip.network == network_id) or not network_id)),
- None)
-
- def run(self):
- server_name_or_id = self.params['server']
- state = self.params['state']
- network = self.params['network']
- floating_ip_address = self.params['floating_ip_address']
- reuse = self.params['reuse']
- fixed_address = self.params['fixed_address']
- nat_destination = self.params['nat_destination']
- wait = self.params['wait']
- timeout = self.params['timeout']
- purge = self.params['purge']
-
- server = self.conn.get_server(server_name_or_id)
- if not server:
- self.fail_json(
- msg="server {0} not found".format(server_name_or_id))
-
- # Extract floating ips from server
- f_ip_addrs = self._list_floating_ips(server)
-
- # Get details about requested floating ip
- f_ip = self._get_floating_ip(floating_ip_address) if floating_ip_address else None
-
- if network:
- network_id = self.conn.get_network(name_or_id=network)["id"]
- else:
- network_id = None
-
- if state == 'present':
- if floating_ip_address and f_ip and floating_ip_address in f_ip_addrs:
- # Floating ip address has been assigned to server
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if f_ip and f_ip['attached'] and floating_ip_address not in f_ip_addrs:
- # Requested floating ip has been attached to different server
- self.fail_json(msg="floating-ip {floating_ip_address} already has been attached to different server"
- .format(floating_ip_address=floating_ip_address))
-
- if not floating_ip_address:
- # No specific floating ip requested, i.e. if any floating ip is already assigned to server,
- # check that it matches requirements.
-
- if not fixed_address and nat_destination:
- # Check if we have any floating ip on the given nat_destination network
- nat_destination_name = self.conn.get_network(nat_destination)['name']
- for addr in server.addresses.get(nat_destination_name, []):
- if addr['OS-EXT-IPS:type'] == 'floating':
- # A floating ip address has been assigned to the requested nat_destination
- f_ip = self._get_floating_ip(addr['addr'])
- self.exit_json(changed=False, floating_ip=f_ip)
- # else fixed_address or not nat_destination, hence an
- # analysis of all floating ips of server is required
- f_ips = [f_ip for f_ip in self.conn.list_floating_ips() if f_ip['floating_ip_address'] in f_ip_addrs]
- for f_ip in f_ips:
- if network_id and f_ip.network != network_id:
- # requested network does not match network of floating ip
- continue
-
- if not fixed_address and not nat_destination:
- # any floating ip will fullfil these requirements
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if fixed_address and f_ip.fixed_ip_address == fixed_address:
- # a floating ip address has been assigned that points to the requested fixed_address
- self.exit_json(changed=False, floating_ip=f_ip)
-
- if floating_ip_address and not f_ip:
- # openstacksdk's create_ip requires floating_ip_address and floating_network_id to be set
- self.conn.network.create_ip(floating_ip_address=floating_ip_address, floating_network_id=network_id)
- # Else floating ip either does not exist or has not been attached yet
-
- # Both floating_ip_address and network are mutually exclusive in add_ips_to_server, i.e.
- # add_ips_to_server will ignore floating_ip_address if network is set
- # Ref.: https://github.com/openstack/openstacksdk/blob/a6b0ece2821ea79330c4067100295f6bdcbe456e/openstack/cloud/_floating_ip.py#L987
- server = self.conn.add_ips_to_server(
- server=server,
- ips=floating_ip_address,
- ip_pool=network if not floating_ip_address else None,
- reuse=reuse,
- fixed_address=fixed_address,
- wait=wait,
- timeout=timeout, nat_destination=nat_destination)
-
- # Update the floating ip status
- f_ip = self._match_floating_ip(server, floating_ip_address, network_id, fixed_address, nat_destination)
- self.exit_json(changed=True, floating_ip=f_ip)
-
- elif state == 'absent':
- f_ip = self._match_floating_ip(server, floating_ip_address, network_id, fixed_address, nat_destination)
- if not f_ip:
- # Nothing to detach
- self.exit_json(changed=False)
- changed = False
-
- if f_ip["fixed_ip_address"]:
- self.conn.detach_ip_from_server(server_id=server['id'], floating_ip_id=f_ip['id'])
- # OpenStackSDK sets {"port_id": None} to detach a floating ip from an instance,
- # but there might be a delay until a server does not list it in addresses any more.
-
- # Update the floating IP status
- f_ip = self.conn.get_floating_ip(id=f_ip['id'])
- changed = True
-
- if purge:
- self.conn.delete_floating_ip(f_ip['id'])
- self.exit_json(changed=True)
- self.exit_json(changed=changed, floating_ip=f_ip)
-
-
-def main():
- module = NetworkingFloatingIPModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_group.py b/ansible_collections/openstack/cloud/plugins/modules/os_group.py
deleted file mode 100644
index 5b45efa4b..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_group.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 IBM
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_group
-short_description: Manage OpenStack Identity Groups
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack Identity Groups. Groups can be created, deleted or
- updated. Only the I(description) value can be updated.
-options:
- name:
- description:
- - Group name
- required: true
- type: str
- description:
- description:
- - Group description
- type: str
- domain_id:
- description:
- - Domain id to create the group in if the cloud supports domains.
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a group named "demo"
-- openstack.cloud.identity_group:
- cloud: mycloud
- state: present
- name: demo
- description: "Demo Group"
- domain_id: demoid
-
-# Update the description on existing "demo" group
-- openstack.cloud.identity_group:
- cloud: mycloud
- state: present
- name: demo
- description: "Something else"
- domain_id: demoid
-
-# Delete group named "demo"
-- openstack.cloud.identity_group:
- cloud: mycloud
- state: absent
- name: demo
-'''
-
-RETURN = '''
-group:
- description: Dictionary describing the group.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- id:
- description: Unique group ID
- type: str
- sample: "ee6156ff04c645f481a6738311aea0b0"
- name:
- description: Group name
- type: str
- sample: "demo"
- description:
- description: Group description
- type: str
- sample: "Demo Group"
- domain_id:
- description: Domain for the group
- type: str
- sample: "default"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityGroupModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- description=dict(required=False, default=None),
- domain_id=dict(required=False, default=None),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _system_state_change(self, state, description, group):
- if state == 'present' and not group:
- return True
- if state == 'present' and description is not None and group.description != description:
- return True
- if state == 'absent' and group:
- return True
- return False
-
- def run(self):
- name = self.params.get('name')
- description = self.params.get('description')
- state = self.params.get('state')
-
- domain_id = self.params.pop('domain_id')
-
- if domain_id:
- group = self.conn.get_group(name, filters={'domain_id': domain_id})
- else:
- group = self.conn.get_group(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, description, group))
-
- if state == 'present':
- if group is None:
- group = self.conn.create_group(
- name=name, description=description, domain=domain_id)
- changed = True
- else:
- if description is not None and group.description != description:
- group = self.conn.update_group(
- group.id, description=description)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, group=group)
-
- elif state == 'absent':
- if group is None:
- changed = False
- else:
- self.conn.delete_group(group.id)
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityGroupModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_group_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_group_info.py
deleted file mode 100644
index 68f00d73a..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_group_info.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2019, Phillipe Smith <phillipelnx@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_group_info
-short_description: Retrieve info about one or more OpenStack groups
-author: OpenStack Ansible SIG
-description:
- - Retrieve info about a one or more OpenStack groups.
-options:
- name:
- description:
- - Name or ID of the group.
- type: str
- domain:
- description:
- - Name or ID of the domain containing the group if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather info about previously created groups
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about previously created groups
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group by name
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group by name
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group in a specific domain
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group in a specific domain
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- domain: admindomain
- register: openstack_groups
- - debug:
- var: openstack_groups
-
-# Gather info about a previously created group in a specific domain with filter
-- name: gather info
- hosts: localhost
- tasks:
- - name: Gather info about a previously created group in a specific domain with filter
- openstack.cloud.identity_group_info:
- cloud: awesomecloud
- name: demogroup
- domain: admindomain
- filters:
- enabled: False
- register: openstack_groups
- - debug:
- var: openstack_groups
-'''
-
-
-RETURN = '''
-openstack_groups:
- description: Dictionary describing all the matching groups.
- returned: always, but can be an empty list
- type: complex
- contains:
- name:
- description: Name given to the group.
- returned: success
- type: str
- description:
- description: Description of the group.
- returned: success
- type: str
- id:
- description: Unique UUID.
- returned: success
- type: str
- domain_id:
- description: Domain ID containing the group (keystone v3 clouds only)
- returned: success
- type: bool
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityGroupInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=False, default=None),
- domain=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
- name = self.params['name']
- domain = self.params['domain']
- filters = self.params['filters'] or {}
-
- args = {}
- if domain:
- dom = self.conn.identity.find_domain(domain)
- if dom:
- args['domain_id'] = dom['id']
- else:
- self.fail_json(msg='Domain name or ID does not exist')
-
- groups = self.conn.search_groups(name, filters, **args)
- # groups is for backward (and forward) compatibility
- self.exit_json(changed=False, groups=groups, openstack_groups=groups)
-
-
-def main():
- module = IdentityGroupInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_image.py b/ansible_collections/openstack/cloud/plugins/modules/os_image.py
deleted file mode 100644
index fae13a2e5..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_image.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-# TODO(mordred): we need to support "location"(v1) and "locations"(v2)
-
-DOCUMENTATION = '''
----
-module: image
-short_description: Add/Delete images from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove images from the OpenStack Image Repository
-options:
- name:
- description:
- - The name of the image when uploading - or the name/ID of the image if deleting
- required: true
- type: str
- id:
- description:
- - The ID of the image when uploading an image
- type: str
- checksum:
- description:
- - The checksum of the image
- type: str
- disk_format:
- description:
- - The format of the disk that is getting uploaded
- default: qcow2
- choices: ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']
- type: str
- container_format:
- description:
- - The format of the container
- default: bare
- choices: ['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']
- type: str
- project:
- description:
- - The name or ID of the project owning the image
- type: str
- aliases: ['owner']
- project_domain:
- description:
- - The domain the project owning the image belongs to
- - May be used to identify a unique project when providing a name to the project argument and multiple projects with such name exist
- type: str
- min_disk:
- description:
- - The minimum disk space (in GB) required to boot this image
- type: int
- min_ram:
- description:
- - The minimum ram (in MB) required to boot this image
- type: int
- is_public:
- description:
- - Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
- type: bool
- default: false
- protected:
- description:
- - Prevent image from being deleted
- type: bool
- default: false
- filename:
- description:
- - The path to the file which has to be uploaded
- type: str
- ramdisk:
- description:
- - The name of an existing ramdisk image that will be associated with this image
- type: str
- kernel:
- description:
- - The name of an existing kernel image that will be associated with this image
- type: str
- properties:
- description:
- - Additional properties to be associated with this image
- default: {}
- type: dict
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- tags:
- description:
- - List of tags to be applied to the image
- default: []
- type: list
- elements: str
- volume:
- description:
- - ID of a volume to create an image from.
- - The volume must be in AVAILABLE state.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
-- openstack.cloud.image:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- openstack.cloud.identity_user_domain_name: Default
- openstack.cloud.project_domain_name: Default
- name: cirros
- container_format: bare
- disk_format: qcow2
- state: present
- filename: cirros-0.3.0-x86_64-disk.img
- kernel: cirros-vmlinuz
- ramdisk: cirros-initrd
- tags:
- - custom
- properties:
- cpu_arch: x86_64
- distro: ubuntu
-
-# Create image from volume attached to an instance
-- name: create volume snapshot
- openstack.cloud.volume_snapshot:
- auth:
- "{{ auth }}"
- display_name: myvol_snapshot
- volume: myvol
- force: yes
- register: myvol_snapshot
-
-- name: create volume from snapshot
- openstack.cloud.volume:
- auth:
- "{{ auth }}"
- size: "{{ myvol_snapshot.snapshot.size }}"
- snapshot_id: "{{ myvol_snapshot.snapshot.id }}"
- display_name: myvol_snapshot_volume
- wait: yes
- register: myvol_snapshot_volume
-
-- name: create image from volume snapshot
- openstack.cloud.image:
- auth:
- "{{ auth }}"
- volume: "{{ myvol_snapshot_volume.volume.id }}"
- name: myvol_image
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ImageModule(OpenStackModule):
-
- deprecated_names = ('os_image', 'openstack.cloud.os_image')
-
- argument_spec = dict(
- name=dict(required=True, type='str'),
- id=dict(type='str'),
- checksum=dict(type='str'),
- disk_format=dict(default='qcow2',
- choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
- container_format=dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
- project=dict(type='str', aliases=['owner']),
- project_domain=dict(type='str'),
- min_disk=dict(type='int', default=0),
- min_ram=dict(type='int', default=0),
- is_public=dict(type='bool', default=False),
- protected=dict(type='bool', default=False),
- filename=dict(type='str'),
- ramdisk=dict(type='str'),
- kernel=dict(type='str'),
- properties=dict(type='dict', default={}),
- volume=dict(type='str'),
- tags=dict(type='list', default=[], elements='str'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- mutually_exclusive=[['filename', 'volume']],
- )
-
- def run(self):
-
- changed = False
- if self.params['id']:
- image = self.conn.get_image(name_or_id=self.params['id'])
- elif self.params['checksum']:
- image = self.conn.get_image(name_or_id=self.params['name'], filters={'checksum': self.params['checksum']})
- else:
- image = self.conn.get_image(name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if not image:
- kwargs = {}
- if self.params['id'] is not None:
- kwargs['id'] = self.params['id']
- if self.params['project']:
- project_domain = {'id': None}
- if self.params['project_domain']:
- project_domain = self.conn.get_domain(name_or_id=self.params['project_domain'])
- if not project_domain or project_domain['id'] is None:
- self.fail(msg='Project domain %s could not be found' % self.params['project_domain'])
- project = self.conn.get_project(name_or_id=self.params['project'], domain_id=project_domain['id'])
- if not project:
- self.fail(msg='Project %s could not be found' % self.params['project'])
- kwargs['owner'] = project['id']
- image = self.conn.create_image(
- name=self.params['name'],
- filename=self.params['filename'],
- disk_format=self.params['disk_format'],
- container_format=self.params['container_format'],
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- is_public=self.params['is_public'],
- protected=self.params['protected'],
- min_disk=self.params['min_disk'],
- min_ram=self.params['min_ram'],
- volume=self.params['volume'],
- tags=self.params['tags'],
- **kwargs
- )
- changed = True
- if not self.params['wait']:
- self.exit(changed=changed, image=image, id=image.id)
-
- self.conn.update_image_properties(
- image=image,
- kernel=self.params['kernel'],
- ramdisk=self.params['ramdisk'],
- protected=self.params['protected'],
- **self.params['properties'])
- if self.params['tags']:
- self.conn.image.update_image(image.id, tags=self.params['tags'])
- image = self.conn.get_image(name_or_id=image.id)
- self.exit(changed=changed, image=image, id=image.id)
-
- elif self.params['state'] == 'absent':
- if not image:
- changed = False
- else:
- self.conn.delete_image(
- name_or_id=self.params['name'],
- wait=self.params['wait'],
- timeout=self.params['timeout'])
- changed = True
- self.exit(changed=changed)
-
-
-def main():
- module = ImageModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_image_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_image_info.py
deleted file mode 100644
index f02079c00..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_image_info.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: image_info
-short_description: Retrieve information about an image within OpenStack.
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about a image image from OpenStack.
- - This module was called C(openstack.cloud.image_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.image_info) module no longer returns C(ansible_facts)!
-options:
- image:
- description:
- - Name or ID of the image
- required: false
- type: str
- filters:
- description:
- - Dict of properties of the images used for query
- type: dict
- required: false
- aliases: ['properties']
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Gather information about a previously created image named image1
- openstack.cloud.image_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- image: image1
- register: result
-
-- name: Show openstack information
- debug:
- msg: "{{ result.image }}"
-
-# Show all available Openstack images
-- name: Retrieve all available Openstack images
- openstack.cloud.image_info:
- register: result
-
-- name: Show images
- debug:
- msg: "{{ result.image }}"
-
-# Show images matching requested properties
-- name: Retrieve images having properties with desired values
- openstack.cloud.image_facts:
- filters:
- some_property: some_value
- OtherProp: OtherVal
-
-- name: Show images
- debug:
- msg: "{{ result.image }}"
-'''
-
-RETURN = '''
-openstack_images:
- description: has all the openstack information about the image
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the image.
- returned: success
- type: str
- status:
- description: Image status.
- returned: success
- type: str
- created_at:
- description: Image created at timestamp.
- returned: success
- type: str
- container_format:
- description: Container format of the image.
- returned: success
- type: str
- direct_url:
- description: URL to access the image file kept in external store.
- returned: success
- type: str
- min_ram:
- description: Min amount of RAM required for this image.
- returned: success
- type: int
- disk_format:
- description: Disk format of the image.
- returned: success
- type: str
- file:
- description: The URL for the virtual machine image file.
- returned: success
- type: str
- os_hidden:
- description: Controls whether an image is displayed in the default image-list response
- returned: success
- type: bool
- locations:
- description: A list of URLs to access the image file in external store.
- returned: success
- type: str
- metadata:
- description: The location metadata.
- returned: success
- type: str
- schema:
- description: URL for the schema describing a virtual machine image.
- returned: success
- type: str
- updated_at:
- description: Image updated at timestamp.
- returned: success
- type: str
- virtual_size:
- description: The virtual size of the image.
- returned: success
- type: str
- min_disk:
- description: Min amount of disk space required for this image.
- returned: success
- type: int
- is_protected:
- description: Image protected flag.
- returned: success
- type: bool
- checksum:
- description: Checksum for the image.
- returned: success
- type: str
- owner:
- description: Owner for the image.
- returned: success
- type: str
- visibility:
- description: Indicates who has access to the image.
- returned: success
- type: str
- size:
- description: Size of the image.
- returned: success
- type: int
- tags:
- description: List of tags assigned to the image
- returned: success
- type: list
-'''
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ImageInfoModule(OpenStackModule):
-
- deprecated_names = ('openstack.cloud.os_image_facts', 'openstack.cloud.os_image_info')
-
- argument_spec = dict(
- image=dict(type='str', required=False),
- filters=dict(type='dict', required=False, aliases=['properties']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
- args = {
- 'name_or_id': self.params['image'],
- 'filters': self.params['filters'],
- }
- args = {k: v for k, v in args.items() if v is not None}
- images = self.conn.search_images(**args)
-
- # for backward compatibility
- if 'name_or_id' in args:
- image = images[0] if images else None
- else:
- image = images
-
- self.exit(changed=False, openstack_images=images, image=image)
-
-
-def main():
- module = ImageInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_ironic.py b/ansible_collections/openstack/cloud/plugins/modules/os_ironic.py
deleted file mode 100644
index 1adb560db..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_ironic.py
+++ /dev/null
@@ -1,441 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# (c) 2014, Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: baremetal_node
-short_description: Create/Delete Bare Metal Resources from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Create or Remove Ironic nodes from OpenStack.
-options:
- state:
- description:
- - Indicates desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- uuid:
- description:
- - globally unique identifier (UUID) to be given to the resource. Will
- be auto-generated if not specified, and name is specified.
- - Definition of a UUID will always take precedence to a name value.
- type: str
- name:
- description:
- - unique name identifier to be given to the resource.
- type: str
- driver:
- description:
- - The name of the Ironic Driver to use with this node.
- - Required when I(state=present)
- type: str
- chassis_uuid:
- description:
- - Associate the node with a pre-defined chassis.
- type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
- type: str
- resource_class:
- description:
- - The specific resource type to which this node belongs.
- type: str
- bios_interface:
- description:
- - The bios interface for this node, e.g. "no-bios".
- type: str
- boot_interface:
- description:
- - The boot interface for this node, e.g. "pxe".
- type: str
- console_interface:
- description:
- - The console interface for this node, e.g. "no-console".
- type: str
- deploy_interface:
- description:
- - The deploy interface for this node, e.g. "iscsi".
- type: str
- inspect_interface:
- description:
- - The interface used for node inspection, e.g. "no-inspect".
- type: str
- management_interface:
- description:
- - The interface for out-of-band management of this node, e.g.
- "ipmitool".
- type: str
- network_interface:
- description:
- - The network interface provider to use when describing
- connections for this node.
- type: str
- power_interface:
- description:
- - The interface used to manage power actions on this node, e.g.
- "ipmitool".
- type: str
- raid_interface:
- description:
- - Interface used for configuring raid on this node.
- type: str
- rescue_interface:
- description:
- - Interface used for node rescue, e.g. "no-rescue".
- type: str
- storage_interface:
- description:
- - Interface used for attaching and detaching volumes on this node, e.g.
- "cinder".
- type: str
- vendor_interface:
- description:
- - Interface for all vendor-specific actions on this node, e.g.
- "no-vendor".
- type: str
- driver_info:
- description:
- - Information for this server's driver. Will vary based on which
- driver is in use. Any sub-field which is populated will be validated
- during creation. For compatibility reasons sub-fields `power`,
- `deploy`, `management` and `console` are flattened.
- required: true
- type: dict
- nics:
- description:
- - 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"'
- required: true
- type: list
- elements: dict
- suboptions:
- mac:
- description: The MAC address of the network interface card.
- type: str
- required: true
- properties:
- description:
- - Definition of the physical characteristics of this server, used for scheduling purposes
- type: dict
- suboptions:
- cpu_arch:
- description:
- - CPU architecture (x86_64, i686, ...)
- default: x86_64
- cpus:
- description:
- - Number of CPU cores this machine has
- default: 1
- ram:
- description:
- - amount of RAM this machine has, in MB
- default: 1
- disk_size:
- description:
- - size of first storage device in this machine (typically /dev/sda), in GB
- default: 1
- capabilities:
- description:
- - special capabilities for the node, such as boot_option, node_role etc
- (see U(https://docs.openstack.org/ironic/latest/install/advanced.html)
- for more information)
- default: ""
- root_device:
- description:
- - Root disk device hints for deployment.
- - See U(https://docs.openstack.org/ironic/latest/install/advanced.html#specifying-the-disk-for-deployment-root-device-hints)
- for allowed hints.
- default: ""
- skip_update_of_masked_password:
- description:
- - Allows the code that would assert changes to nodes to skip the
- update if the change is a single line consisting of the password
- field.
- - As of Kilo, by default, passwords are always masked to API
- requests, which means the logic as a result always attempts to
- re-assert the password field.
- - C(skip_update_of_driver_password) is deprecated alias and will be removed in openstack.cloud 2.0.0.
- type: bool
- aliases:
- - skip_update_of_driver_password
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
- - "jsonpatch"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Enroll a node with some basic properties and driver info
-- openstack.cloud.baremetal_node:
- cloud: "devstack"
- driver: "pxe_ipmitool"
- uuid: "00000000-0000-0000-0000-000000000002"
- properties:
- cpus: 2
- cpu_arch: "x86_64"
- ram: 8192
- disk_size: 64
- capabilities: "boot_option:local"
- root_device:
- wwn: "0x4000cca77fc4dba1"
- nics:
- - mac: "aa:bb:cc:aa:bb:cc"
- - mac: "dd:ee:ff:dd:ee:ff"
- driver_info:
- ipmi_address: "1.2.3.4"
- ipmi_username: "admin"
- ipmi_password: "adminpass"
- chassis_uuid: "00000000-0000-0000-0000-000000000001"
-
-'''
-
-try:
- import jsonpatch
- HAS_JSONPATCH = True
-except ImportError:
- HAS_JSONPATCH = False
-
-
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
-)
-
-
-_PROPERTIES = {
- 'cpu_arch': 'cpu_arch',
- 'cpus': 'cpus',
- 'ram': 'memory_mb',
- 'disk_size': 'local_gb',
- 'capabilities': 'capabilities',
- 'root_device': 'root_device',
-}
-
-
-def _parse_properties(module):
- """Convert ansible properties into native ironic values.
-
- Also filter out any properties that are not set.
- """
- p = module.params['properties']
- return {to_key: p[from_key] for (from_key, to_key) in _PROPERTIES.items()
- if p.get(from_key) is not None}
-
-
-def _parse_driver_info(sdk, module):
- info = module.params['driver_info'].copy()
- for deprecated in ('power', 'console', 'management', 'deploy'):
- if deprecated in info:
- info.update(info.pop(deprecated))
- module.deprecate("Suboption %s of the driver_info parameter of "
- "'openstack.cloud.baremetal_node' is deprecated"
- % deprecated, version='2.0.0',
- collection_name='openstack.cloud')
- return info
-
-
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
-
-
-def _choose_if_password_only(module, patch):
- if len(patch) == 1:
- if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']:
- # Return false to abort update as the password appears
- # to be the only element in the patch.
- return False
- return True
-
-
-def _exit_node_not_updated(module, server):
- module.exit_json(
- changed=False,
- result="Node not updated",
- uuid=server['uuid'],
- provision_state=server['provision_state']
- )
-
-
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- driver=dict(required=False),
- resource_class=dict(required=False),
- bios_interface=dict(required=False),
- boot_interface=dict(required=False),
- console_interface=dict(required=False),
- deploy_interface=dict(required=False),
- inspect_interface=dict(required=False),
- management_interface=dict(required=False),
- network_interface=dict(required=False),
- power_interface=dict(required=False),
- raid_interface=dict(required=False),
- rescue_interface=dict(required=False),
- storage_interface=dict(required=False),
- vendor_interface=dict(required=False),
- driver_info=dict(type='dict', required=True),
- nics=dict(type='list', required=True, elements="dict"),
- properties=dict(type='dict', default={}),
- chassis_uuid=dict(required=False),
- skip_update_of_masked_password=dict(
- required=False,
- type='bool',
- aliases=['skip_update_of_driver_password'],
- deprecated_aliases=[dict(
- name='skip_update_of_driver_password',
- version='2.0.0',
- collection_name='openstack.cloud')]
- ),
- state=dict(required=False, default='present', choices=['present', 'absent'])
- )
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- if not HAS_JSONPATCH:
- module.fail_json(msg='jsonpatch is required for this module')
-
- node_id = _choose_id_value(module)
-
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- server = cloud.get_machine(node_id)
- if module.params['state'] == 'present':
- if module.params['driver'] is None:
- module.fail_json(msg="A driver must be defined in order "
- "to set a node to present.")
-
- properties = _parse_properties(module)
- driver_info = _parse_driver_info(sdk, module)
- kwargs = dict(
- driver=module.params['driver'],
- properties=properties,
- driver_info=driver_info,
- name=module.params['name'],
- )
- optional_field_names = ('resource_class',
- 'bios_interface',
- 'boot_interface',
- 'console_interface',
- 'deploy_interface',
- 'inspect_interface',
- 'management_interface',
- 'network_interface',
- 'power_interface',
- 'raid_interface',
- 'rescue_interface',
- 'storage_interface',
- 'vendor_interface')
- for i in optional_field_names:
- if module.params[i]:
- kwargs[i] = module.params[i]
-
- if module.params['chassis_uuid']:
- kwargs['chassis_uuid'] = module.params['chassis_uuid']
-
- if server is None:
- # Note(TheJulia): Add a specific UUID to the request if
- # present in order to be able to re-use kwargs for if
- # the node already exists logic, since uuid cannot be
- # updated.
- if module.params['uuid']:
- kwargs['uuid'] = module.params['uuid']
-
- server = cloud.register_machine(module.params['nics'],
- **kwargs)
- module.exit_json(changed=True, uuid=server['uuid'],
- provision_state=server['provision_state'])
- else:
- # TODO(TheJulia): Presently this does not support updating
- # nics. Support needs to be added.
- #
- # Note(TheJulia): This message should never get logged
- # however we cannot realistically proceed if neither a
- # name or uuid was supplied to begin with.
- if not node_id:
- module.fail_json(msg="A uuid or name value "
- "must be defined")
-
- # Note(TheJulia): Constructing the configuration to compare
- # against. The items listed in the server_config block can
- # be updated via the API.
-
- server_config = dict(
- driver=server['driver'],
- properties=server['properties'],
- driver_info=server['driver_info'],
- name=server['name'],
- )
-
- # Add the pre-existing chassis_uuid only if
- # it is present in the server configuration.
- if hasattr(server, 'chassis_uuid'):
- server_config['chassis_uuid'] = server['chassis_uuid']
-
- # Note(TheJulia): If a password is defined and concealed, a
- # patch will always be generated and re-asserted.
- patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs)
-
- if not patch:
- _exit_node_not_updated(module, server)
- elif _choose_if_password_only(module, list(patch)):
- # Note(TheJulia): Normally we would allow the general
- # exception catch below, however this allows a specific
- # message.
- try:
- server = cloud.patch_machine(
- server['uuid'],
- list(patch))
- except Exception as e:
- module.fail_json(msg="Failed to update node, "
- "Error: %s" % e.message)
-
- # Enumerate out a list of changed paths.
- change_list = []
- for change in list(patch):
- change_list.append(change['path'])
- module.exit_json(changed=True,
- result="Node Updated",
- changes=change_list,
- uuid=server['uuid'],
- provision_state=server['provision_state'])
-
- # Return not updated by default as the conditions were not met
- # to update.
- _exit_node_not_updated(module, server)
-
- if module.params['state'] == 'absent':
- if not node_id:
- module.fail_json(msg="A uuid or name value must be defined "
- "in order to remove a node.")
-
- if server is not None:
- cloud.unregister_machine(module.params['nics'],
- server['uuid'])
- module.exit_json(changed=True, result="deleted")
- else:
- module.exit_json(changed=False, result="Server not found")
-
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_ironic_inspect.py b/ansible_collections/openstack/cloud/plugins/modules/os_ironic_inspect.py
deleted file mode 100644
index f7d90d1c5..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_ironic_inspect.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: baremetal_inspect
-short_description: Explicitly triggers baremetal node introspection in ironic.
-author: OpenStack Ansible SIG
-description:
- - Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
- This command may be out of band or in-band depending on the ironic driver configuration.
- This is only possible on nodes in 'manageable' and 'available' state.
-options:
- mac:
- description:
- - unique mac address that is used to attempt to identify the host.
- type: str
- uuid:
- description:
- - globally unique identifier (UUID) to identify the host.
- type: str
- name:
- description:
- - unique name identifier to identify the host in Ironic.
- type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
- Use with "auth" and "auth_type" settings set to None.
- type: str
- timeout:
- description:
- - A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
- default: 1200
- type: int
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-ansible_facts:
- description: Dictionary of new facts representing discovered properties of the node..
- returned: changed
- type: complex
- contains:
- memory_mb:
- description: Amount of node memory as updated in the node properties
- type: str
- sample: "1024"
- cpu_arch:
- description: Detected CPU architecture type
- type: str
- sample: "x86_64"
- local_gb:
- description: Total size of local disk storage as updated in node properties.
- type: str
- sample: "10"
- cpus:
- description: Count of cpu cores defined in the updated node properties.
- type: str
- sample: "1"
-'''
-
-EXAMPLES = '''
-# Invoke node inspection
-- openstack.cloud.baremetal_inspect:
- name: "testnode1"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
-)
-
-
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
-
-
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- mac=dict(required=False),
- timeout=dict(default=1200, type='int', required=False),
- )
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- if module.params['name'] or module.params['uuid']:
- server = cloud.get_machine(_choose_id_value(module))
- elif module.params['mac']:
- server = cloud.get_machine_by_mac(module.params['mac'])
- else:
- module.fail_json(msg="The worlds did not align, "
- "the host was not found as "
- "no name, uuid, or mac was "
- "defined.")
- if server:
- cloud.inspect_machine(server['uuid'], module.params['wait'])
- # TODO(TheJulia): diff properties, ?and ports? and determine
- # if a change occurred. In theory, the node is always changed
- # if introspection is able to update the record.
- module.exit_json(changed=True,
- ansible_facts=server['properties'])
-
- else:
- module.fail_json(msg="node not found.")
-
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_ironic_node.py b/ansible_collections/openstack/cloud/plugins/modules/os_ironic_node.py
deleted file mode 100644
index 267e43088..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_ironic_node.py
+++ /dev/null
@@ -1,362 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# (c) 2015, Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: baremetal_node_action
-short_description: Activate/Deactivate Bare Metal Resources from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Deploy to nodes controlled by Ironic.
-options:
- name:
- description:
- - Name of the node to create.
- type: str
- state:
- description:
- - Indicates desired state of the resource.
- - I(state) can be C('present'), C('absent'), C('maintenance') or C('off').
- default: present
- type: str
- deploy:
- description:
- - Indicates if the resource should be deployed. Allows for deployment
- logic to be disengaged and control of the node power or maintenance
- state to be changed.
- type: str
- default: 'yes'
- uuid:
- description:
- - globally unique identifier (UUID) to be given to the resource.
- type: str
- ironic_url:
- description:
- - If noauth mode is utilized, this is required to be set to the
- endpoint URL for the Ironic API. Use with "auth" and "auth_type"
- settings set to None.
- type: str
- config_drive:
- description:
- - A configdrive file or HTTP(S) URL that will be passed along to the
- node.
- type: raw
- instance_info:
- description:
- - Definition of the instance information which is used to deploy
- the node. This information is only required when an instance is
- set to present.
- type: dict
- suboptions:
- image_source:
- description:
- - An HTTP(S) URL where the image can be retrieved from.
- image_checksum:
- description:
- - The checksum of image_source.
- image_disk_format:
- description:
- - The type of image that has been requested to be deployed.
- power:
- description:
- - A setting to allow power state to be asserted allowing nodes
- that are not yet deployed to be powered on, and nodes that
- are deployed to be powered off.
- - I(power) can be C('present'), C('absent'), C('maintenance') or C('off').
- default: present
- type: str
- maintenance:
- description:
- - A setting to allow the direct control if a node is in
- maintenance mode.
- - I(maintenance) can be C('yes'), C('no'), C('True'), or C('False').
- type: str
- maintenance_reason:
- description:
- - A string expression regarding the reason a node is in a
- maintenance mode.
- type: str
- wait:
- description:
- - A boolean value instructing the module to wait for node
- activation or deactivation to complete before returning.
- type: bool
- default: 'no'
- timeout:
- description:
- - An integer value representing the number of seconds to
- wait for the node activation or deactivation to complete.
- default: 1800
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Activate a node by booting an image with a configdrive attached
-- openstack.cloud.baremetal_node_action:
- cloud: "openstack"
- uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
- state: present
- power: present
- deploy: True
- maintenance: False
- config_drive: "http://192.168.1.1/host-configdrive.iso"
- instance_info:
- image_source: "http://192.168.1.1/deploy_image.img"
- image_checksum: "356a6b55ecc511a20c33c946c4e678af"
- image_disk_format: "qcow"
- delegate_to: localhost
-
-# Activate a node by booting an image with a configdrive json object
-- openstack.cloud.baremetal_node_action:
- uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
- auth_type: None
- ironic_url: "http://192.168.1.1:6385/"
- config_drive:
- meta_data:
- hostname: node1
- public_keys:
- default: ssh-rsa AAA...BBB==
- instance_info:
- image_source: "http://192.168.1.1/deploy_image.img"
- image_checksum: "356a6b55ecc511a20c33c946c4e678af"
- image_disk_format: "qcow"
- delegate_to: localhost
-'''
-
-
-from ansible_collections.openstack.cloud.plugins.module_utils.ironic import (
- IronicModule,
- ironic_argument_spec,
-)
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_module_kwargs,
- openstack_cloud_from_module
-)
-
-
-def _choose_id_value(module):
- if module.params['uuid']:
- return module.params['uuid']
- if module.params['name']:
- return module.params['name']
- return None
-
-
-def _is_true(value):
- true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
- if value in true_values:
- return True
- return False
-
-
-def _is_false(value):
- false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
- if value in false_values:
- return True
- return False
-
-
-def _check_set_maintenance(module, cloud, node):
- if _is_true(module.params['maintenance']):
- if _is_false(node['maintenance']):
- cloud.set_machine_maintenance_state(
- node['uuid'],
- True,
- reason=module.params['maintenance_reason'])
- module.exit_json(changed=True, msg="Node has been set into "
- "maintenance mode")
- else:
- # User has requested maintenance state, node is already in the
- # desired state, checking to see if the reason has changed.
- if (str(node['maintenance_reason']) not in
- str(module.params['maintenance_reason'])):
- cloud.set_machine_maintenance_state(
- node['uuid'],
- True,
- reason=module.params['maintenance_reason'])
- module.exit_json(changed=True, msg="Node maintenance reason "
- "updated, cannot take any "
- "additional action.")
- elif _is_false(module.params['maintenance']):
- if node['maintenance'] is True:
- cloud.remove_machine_from_maintenance(node['uuid'])
- return True
- else:
- module.fail_json(msg="maintenance parameter was set but a valid "
- "the value was not recognized.")
- return False
-
-
-def _check_set_power_state(module, cloud, node):
- if 'power on' in str(node['power_state']):
- if _is_false(module.params['power']):
- # User has requested the node be powered off.
- cloud.set_machine_power_off(node['uuid'])
- module.exit_json(changed=True, msg="Power requested off")
- if 'power off' in str(node['power_state']):
- if (
- _is_false(module.params['power'])
- and _is_false(module.params['state'])
- ):
- return False
- if (
- _is_false(module.params['power'])
- and _is_false(module.params['state'])
- ):
- module.exit_json(
- changed=False,
- msg="Power for node is %s, node must be reactivated "
- "OR set to state absent"
- )
- # In the event the power has been toggled on and
- # deployment has been requested, we need to skip this
- # step.
- if (
- _is_true(module.params['power'])
- and _is_false(module.params['deploy'])
- ):
- # Node is powered down when it is not awaiting to be provisioned
- cloud.set_machine_power_on(node['uuid'])
- return True
- # Default False if no action has been taken.
- return False
-
-
-def main():
- argument_spec = ironic_argument_spec(
- uuid=dict(required=False),
- name=dict(required=False),
- instance_info=dict(type='dict', required=False),
- config_drive=dict(type='raw', required=False),
- state=dict(required=False, default='present'),
- maintenance=dict(required=False),
- maintenance_reason=dict(required=False),
- power=dict(required=False, default='present'),
- deploy=dict(required=False, default='yes'),
- wait=dict(type='bool', required=False, default=False),
- timeout=dict(required=False, type='int', default=1800),
- )
- module_kwargs = openstack_module_kwargs()
- module = IronicModule(argument_spec, **module_kwargs)
-
- if (
- module.params['config_drive']
- and not isinstance(module.params['config_drive'], (str, dict))
- ):
- config_drive_type = type(module.params['config_drive'])
- msg = ('argument config_drive is of type %s and we expected'
- ' str or dict') % config_drive_type
- module.fail_json(msg=msg)
-
- node_id = _choose_id_value(module)
-
- if not node_id:
- module.fail_json(msg="A uuid or name value must be defined "
- "to use this module.")
- sdk, cloud = openstack_cloud_from_module(module)
- try:
- node = cloud.get_machine(node_id)
-
- if node is None:
- module.fail_json(msg="node not found")
-
- uuid = node['uuid']
- instance_info = module.params['instance_info']
- changed = False
- wait = module.params['wait']
- timeout = module.params['timeout']
-
- # User has requested desired state to be in maintenance state.
- if module.params['state'] == 'maintenance':
- module.params['maintenance'] = True
-
- if node['provision_state'] in [
- 'cleaning',
- 'deleting',
- 'wait call-back']:
- module.fail_json(msg="Node is in %s state, cannot act upon the "
- "request as the node is in a transition "
- "state" % node['provision_state'])
- # TODO(TheJulia) This is in-development code, that requires
- # code in the shade library that is still in development.
- if _check_set_maintenance(module, cloud, node):
- if node['provision_state'] in 'active':
- module.exit_json(changed=True,
- result="Maintenance state changed")
- changed = True
- node = cloud.get_machine(node_id)
-
- if _check_set_power_state(module, cloud, node):
- changed = True
- node = cloud.get_machine(node_id)
-
- if _is_true(module.params['state']):
- if _is_false(module.params['deploy']):
- module.exit_json(
- changed=changed,
- result="User request has explicitly disabled "
- "deployment logic"
- )
-
- if 'active' in node['provision_state']:
- module.exit_json(
- changed=changed,
- result="Node already in an active state."
- )
-
- if instance_info is None:
- module.fail_json(
- changed=changed,
- msg="When setting an instance to present, "
- "instance_info is a required variable.")
-
- # TODO(TheJulia): Update instance info, however info is
- # deployment specific. Perhaps consider adding rebuild
- # support, although there is a known desire to remove
- # rebuild support from Ironic at some point in the future.
- cloud.update_machine(uuid, instance_info=instance_info)
- cloud.validate_node(uuid)
- if not wait:
- cloud.activate_node(uuid, module.params['config_drive'])
- else:
- cloud.activate_node(
- uuid,
- configdrive=module.params['config_drive'],
- wait=wait,
- timeout=timeout)
- # TODO(TheJulia): Add more error checking..
- module.exit_json(changed=changed, result="node activated")
-
- elif _is_false(module.params['state']):
- if node['provision_state'] not in "deleted":
- cloud.update_machine(uuid, instance_info={})
- if not wait:
- cloud.deactivate_node(uuid)
- else:
- cloud.deactivate_node(
- uuid,
- wait=wait,
- timeout=timeout)
-
- module.exit_json(changed=True, result="deleted")
- else:
- module.exit_json(changed=False, result="node not found")
- else:
- module.fail_json(msg="State must be present, absent, "
- "maintenance, off")
-
- except sdk.exceptions.OpenStackCloudException as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keypair.py b/ansible_collections/openstack/cloud/plugins/modules/os_keypair.py
deleted file mode 100644
index 232d4985e..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keypair.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# Copyright (c) 2013, John Dewey <john@dewey.ws>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: keypair
-short_description: Add/Delete a keypair from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Add or Remove key pair from OpenStack
-options:
- name:
- description:
- - Name that has to be given to the key pair
- required: true
- type: str
- public_key:
- description:
- - The public key that would be uploaded to nova and injected into VMs
- upon creation.
- type: str
- public_key_file:
- description:
- - Path to local file containing ssh public key. Mutually exclusive
- with public_key.
- type: str
- state:
- description:
- - Should the resource be present or absent. If state is replace and
- the key exists but has different content, delete it and recreate it
- with the new content.
- choices: [present, absent, replace]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Creates a key pair with the running users public key
-- openstack.cloud.keypair:
- cloud: mordred
- state: present
- name: ansible_key
- public_key_file: /home/me/.ssh/id_rsa.pub
-
-# Creates a new key pair and the private key returned after the run.
-- openstack.cloud.keypair:
- cloud: rax-dfw
- state: present
- name: ansible_key
-'''
-
-RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: Name given to the keypair.
- returned: success
- type: str
-public_key:
- description: The public key value for the keypair.
- returned: success
- type: str
-private_key:
- description: The private key value for the keypair.
- returned: Only when a keypair is generated for the user (e.g., when creating one
- and a public key is not specified).
- type: str
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- OpenStackModule)
-
-
-class KeyPairModule(OpenStackModule):
- deprecated_names = ('os_keypair', 'openstack.cloud.os_keypair')
-
- argument_spec = dict(
- name=dict(required=True),
- public_key=dict(default=None),
- public_key_file=dict(default=None),
- state=dict(default='present',
- choices=['absent', 'present', 'replace']),
- )
-
- module_kwargs = dict(
- mutually_exclusive=[['public_key', 'public_key_file']])
-
- def _system_state_change(self, keypair):
- state = self.params['state']
- if state == 'present' and not keypair:
- return True
- if state == 'absent' and keypair:
- return True
- return False
-
- def run(self):
-
- state = self.params['state']
- name = self.params['name']
- public_key = self.params['public_key']
-
- if self.params['public_key_file']:
- with open(self.params['public_key_file']) as public_key_fh:
- public_key = public_key_fh.read()
-
- keypair = self.conn.get_keypair(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(keypair))
-
- if state in ('present', 'replace'):
- if keypair and keypair['name'] == name:
- if public_key and (public_key != keypair['public_key']):
- if state == 'present':
- self.fail_json(
- msg="Key name %s present but key hash not the same"
- " as offered. Delete key first." % name
- )
- else:
- self.conn.delete_keypair(name)
- keypair = self.conn.create_keypair(name, public_key)
- changed = True
- else:
- changed = False
- else:
- keypair = self.conn.create_keypair(name, public_key)
- changed = True
-
- self.exit_json(changed=changed, key=keypair, id=keypair['id'])
-
- elif state == 'absent':
- if keypair:
- self.conn.delete_keypair(name)
- self.exit_json(changed=True)
- self.exit_json(changed=False)
-
-
-def main():
- module = KeyPairModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain.py
deleted file mode 100644
index 660748c49..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_domain
-short_description: Manage OpenStack Identity Domains
-author: OpenStack Ansible SIG
-description:
- - Create, update, or delete OpenStack Identity domains. If a domain
- with the supplied name already exists, it will be updated with the
- new description and enabled attributes.
-options:
- name:
- description:
- - Name that has to be given to the instance
- required: true
- type: str
- description:
- description:
- - Description of the domain
- type: str
- enabled:
- description:
- - Is the domain enabled
- type: bool
- default: 'yes'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a domain
-- openstack.cloud.identity_domain:
- cloud: mycloud
- state: present
- name: demo
- description: Demo Domain
-
-# Delete a domain
-- openstack.cloud.identity_domain:
- cloud: mycloud
- state: absent
- name: demo
-'''
-
-RETURN = '''
-domain:
- description: Dictionary describing the domain.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Domain ID.
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
- name:
- description: Domain name.
- type: str
- sample: "demo"
- description:
- description: Domain description.
- type: str
- sample: "Demo Domain"
- enabled:
- description: Domain description.
- type: bool
- sample: True
-
-id:
- description: The domain ID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityDomainModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- description=dict(default=None),
- enabled=dict(default=True, type='bool'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _needs_update(self, domain):
- if self.params['description'] is not None and \
- domain.description != self.params['description']:
- return True
- if domain.get(
- "is_enabled", domain.get("enabled")) != self.params['enabled']:
- return True
- return False
-
- def _system_state_change(self, domain):
- state = self.params['state']
- if state == 'absent' and domain:
- return True
-
- if state == 'present':
- if domain is None:
- return True
- return self._needs_update(domain)
-
- return False
-
- def run(self):
- name = self.params['name']
- description = self.params['description']
- enabled = self.params['enabled']
- state = self.params['state']
-
- domains = list(self.conn.identity.domains(name=name))
-
- if len(domains) > 1:
- self.fail_json(msg='Domain name %s is not unique' % name)
- elif len(domains) == 1:
- domain = domains[0]
- else:
- domain = None
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(domain))
-
- if state == 'present':
- if domain is None:
- domain = self.conn.create_domain(
- name=name, description=description, enabled=enabled)
- changed = True
- else:
- if self._needs_update(domain):
- domain = self.conn.update_domain(
- domain.id, name=name, description=description,
- enabled=enabled)
- changed = True
- else:
- changed = False
- if hasattr(domain, "to_dict"):
- domain = domain.to_dict()
- domain.pop("location")
- self.exit_json(changed=changed, domain=domain, id=domain['id'])
-
- elif state == 'absent':
- if domain is None:
- changed = False
- else:
- self.conn.delete_domain(domain.id)
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityDomainModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain_info.py
deleted file mode 100644
index e0e33cde5..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_domain_info.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_domain_info
-short_description: Retrieve information about one or more OpenStack domains
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about a one or more OpenStack domains
- - This module was called C(openstack.cloud.identity_domain_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.identity_domain_info) module no longer returns C(ansible_facts)!
-options:
- name:
- description:
- - Name or ID of the domain
- type: str
- filters:
- description:
- - A dictionary of meta data to use for filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about previously created domain
-- openstack.cloud.identity_domain_info:
- cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
-
-# Gather information about a previously created domain by name
-- openstack.cloud.identity_domain_info:
- cloud: awesomecloud
- name: demodomain
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
-
-# Gather information about a previously created domain with filter
-- openstack.cloud.identity_domain_info:
- cloud: awesomecloud
- name: demodomain
- filters:
- enabled: false
- register: result
-- debug:
- msg: "{{ result.openstack_domains }}"
-'''
-
-
-RETURN = '''
-openstack_domains:
- description: has all the OpenStack information about domains
- returned: always, but can be null
- type: list
- elements: dict
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the domain.
- returned: success
- type: str
- description:
- description: Description of the domain.
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the domain is enabled.
- returned: success
- type: bool
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityDomainInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- deprecated_names = ('openstack.cloud.identity_domain_facts')
-
- def run(self):
- name = self.params['name']
- filters = self.params['filters'] or {}
-
- args = {}
- if name:
- args['name_or_id'] = name
- args['filters'] = filters
-
- domains = self.conn.search_domains(**args)
- self.exit_json(changed=False, openstack_domains=domains)
-
-
-def main():
- module = IdentityDomainInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_endpoint.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_endpoint.py
deleted file mode 100644
index e7864ecf1..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_endpoint.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2017, VEXXHOST, Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: endpoint
-short_description: Manage OpenStack Identity service endpoints
-author: OpenStack Ansible SIG
-description:
- - Create, update, or delete OpenStack Identity service endpoints. If a
- service with the same combination of I(service), I(interface) and I(region)
- exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
-options:
- service:
- description:
- - Name or id of the service.
- required: true
- type: str
- endpoint_interface:
- description:
- - Interface of the service.
- choices: [admin, public, internal]
- required: true
- type: str
- url:
- description:
- - URL of the service.
- required: true
- type: str
- region:
- description:
- - Region that the service belongs to. Note that I(region_name) is used for authentication.
- type: str
- enabled:
- description:
- - Is the service enabled.
- default: True
- type: bool
- state:
- description:
- - Should the resource be C(present) or C(absent).
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.13.0"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Create a service for glance
- openstack.cloud.endpoint:
- cloud: mycloud
- service: glance
- endpoint_interface: public
- url: http://controller:9292
- region: RegionOne
- state: present
-
-- name: Delete a service for nova
- openstack.cloud.endpoint:
- cloud: mycloud
- service: nova
- endpoint_interface: public
- region: RegionOne
- state: absent
-'''
-
-RETURN = '''
-endpoint:
- description: Dictionary describing the endpoint.
- returned: On success when I(state) is C(present)
- type: complex
- contains:
- id:
- description: Endpoint ID.
- type: str
- sample: 3292f020780b4d5baf27ff7e1d224c44
- interface:
- description: Endpoint Interface.
- type: str
- sample: public
- enabled:
- description: Service status.
- type: bool
- sample: True
- links:
- description: Links for the endpoint
- type: str
- sample: http://controller/identity/v3/endpoints/123
- region:
- description: Same as C(region_id). Deprecated.
- type: str
- sample: RegionOne
- region_id:
- description: Region ID.
- type: str
- sample: RegionOne
- service_id:
- description: Service ID.
- type: str
- sample: b91f1318f735494a825a55388ee118f3
- url:
- description: Service URL.
- type: str
- sample: http://controller:9292
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityEndpointModule(OpenStackModule):
- argument_spec = dict(
- service=dict(type='str', required=True),
- endpoint_interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
- url=dict(type='str', required=True),
- region=dict(type='str'),
- enabled=dict(type='bool', default=True),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _needs_update(self, endpoint):
- if endpoint.enabled != self.params['enabled']:
- return True
- if endpoint.url != self.params['url']:
- return True
- return False
-
- def _system_state_change(self, endpoint):
- state = self.params['state']
- if state == 'absent' and endpoint:
- return True
-
- if state == 'present':
- if endpoint is None:
- return True
- return self._needs_update(endpoint)
-
- return False
-
- def run(self):
- service_name_or_id = self.params['service']
- interface = self.params['endpoint_interface']
- url = self.params['url']
- region = self.params['region']
- enabled = self.params['enabled']
- state = self.params['state']
-
- service = self.conn.get_service(service_name_or_id)
-
- if service is None and state == 'absent':
- self.exit_json(changed=False)
-
- if service is None and state == 'present':
- self.fail_json(msg='Service %s does not exist' % service_name_or_id)
-
- filters = dict(service_id=service.id, interface=interface)
- if region is not None:
- filters['region'] = region
- endpoints = self.conn.search_endpoints(filters=filters)
-
- endpoint = None
- if len(endpoints) > 1:
- self.fail_json(msg='Service %s, interface %s and region %s are '
- 'not unique' %
- (service_name_or_id, interface, region))
- elif len(endpoints) == 1:
- endpoint = endpoints[0]
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(endpoint))
-
- if state == 'present':
- if endpoint is None:
- args = {'url': url, 'interface': interface,
- 'service_name_or_id': service.id, 'enabled': enabled,
- 'region': region}
- endpoints = self.conn.create_endpoint(**args)
- # safe because endpoints contains a single item when url is
- # given to self.conn.create_endpoint()
- endpoint = endpoints[0]
-
- changed = True
- else:
- if self._needs_update(endpoint):
- endpoint = self.conn.update_endpoint(
- endpoint.id, url=url, enabled=enabled)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed,
- endpoint=endpoint)
-
- elif state == 'absent':
- if endpoint is None:
- changed = False
- else:
- self.conn.delete_endpoint(endpoint.id)
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityEndpointModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol.py
deleted file mode 100644
index 5a33d8a32..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: keystone_federation_protocol
-short_description: manage a federation Protocol
-author: OpenStack Ansible SIG
-description:
- - Manage a federation Protocol.
-options:
- name:
- description:
- - The name of the Protocol.
- type: str
- required: true
- aliases: ['id']
- state:
- description:
- - Whether the protocol should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
- type: str
- idp_id:
- description:
- - The name of the Identity Provider this Protocol is associated with.
- aliases: ['idp_name']
- required: true
- type: str
- mapping_id:
- description:
- - The name of the Mapping to use for this Protocol.'
- - Required when creating a new Protocol.
- type: str
- aliases: ['mapping_name']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Create a protocol
- openstack.cloud.keystone_federation_protocol:
- cloud: example_cloud
- name: example_protocol
- idp_id: example_idp
- mapping_id: example_mapping
-
-- name: Delete a protocol
- openstack.cloud.keystone_federation_protocol:
- cloud: example_cloud
- name: example_protocol
- idp_id: example_idp
- state: absent
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationProtocolModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True, aliases=['id']),
- state=dict(default='present', choices=['absent', 'present']),
- idp_id=dict(required=True, aliases=['idp_name']),
- mapping_id=dict(aliases=['mapping_name']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def normalize_protocol(self, protocol):
- """
- Normalizes the protocol definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if protocol is None:
- return None
-
- _protocol = protocol.to_dict()
- _protocol['name'] = protocol['id']
- # As of 0.44 SDK doesn't copy the URI parameters over, so let's add them
- _protocol['idp_id'] = protocol['idp_id']
- return _protocol
-
- def delete_protocol(self, protocol):
- """
- Delete an existing Protocol
-
- returns: the "Changed" state
- """
- if protocol is None:
- return False
-
- if self.ansible.check_mode:
- return True
-
- self.conn.identity.delete_federation_protocol(None, protocol)
- return True
-
- def create_protocol(self, name):
- """
- Create a new Protocol
-
- returns: the "Changed" state and the new protocol
- """
- if self.ansible.check_mode:
- return True, None
-
- idp_name = self.params.get('idp_id')
- mapping_id = self.params.get('mapping_id')
-
- attributes = {
- 'idp_id': idp_name,
- 'mapping_id': mapping_id,
- }
-
- protocol = self.conn.identity.create_federation_protocol(id=name, **attributes)
- return (True, protocol)
-
- def update_protocol(self, protocol):
- """
- Update an existing Protocol
-
- returns: the "Changed" state and the new protocol
- """
- mapping_id = self.params.get('mapping_id')
-
- attributes = {}
-
- if (mapping_id is not None) and (mapping_id != protocol.mapping_id):
- attributes['mapping_id'] = mapping_id
-
- if not attributes:
- return False, protocol
-
- if self.ansible.check_mode:
- return True, None
-
- new_protocol = self.conn.identity.update_federation_protocol(None, protocol, **attributes)
- return (True, new_protocol)
-
- def run(self):
- """ Module entry point """
- name = self.params.get('name')
- state = self.params.get('state')
- idp = self.params.get('idp_id')
- changed = False
-
- protocol = self.conn.identity.find_federation_protocol(idp, name)
-
- if state == 'absent':
- if protocol is not None:
- changed = self.delete_protocol(protocol)
- self.exit_json(changed=changed)
-
- # state == 'present'
- else:
- if protocol is None:
- if self.params.get('mapping_id') is None:
- self.fail_json(
- msg='A mapping_id must be passed when creating'
- ' a protocol')
- (changed, protocol) = self.create_protocol(name)
- protocol = self.normalize_protocol(protocol)
- self.exit_json(changed=changed, protocol=protocol)
-
- else:
- (changed, new_protocol) = self.update_protocol(protocol)
- new_protocol = self.normalize_protocol(new_protocol)
- self.exit_json(changed=changed, protocol=new_protocol)
-
-
-def main():
- module = IdentityFederationProtocolModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol_info.py
deleted file mode 100644
index b281b13e3..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_federation_protocol_info.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: keystone_federation_protocol_info
-short_description: get information about federation Protocols
-author: OpenStack Ansible SIG
-description:
- - Get information about federation Protocols.
-options:
- name:
- description:
- - The name of the Protocol.
- type: str
- aliases: ['id']
- idp_id:
- description:
- - The name of the Identity Provider this Protocol is associated with.
- aliases: ['idp_name']
- required: true
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Describe a protocol
- openstack.cloud.keystone_federation_protocol_info:
- cloud: example_cloud
- name: example_protocol
- idp_id: example_idp
- mapping_name: example_mapping
-
-- name: Describe all protocols attached to an IDP
- openstack.cloud.keystone_federation_protocol_info:
- cloud: example_cloud
- idp_id: example_idp
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationProtocolInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(aliases=['id']),
- idp_id=dict(required=True, aliases=['idp_name']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def normalize_protocol(self, protocol):
- """
- Normalizes the protocol definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if protocol is None:
- return None
-
- _protocol = protocol.to_dict()
- _protocol['name'] = protocol['id']
- # As of 0.44 SDK doesn't copy the URI parameters over, so let's add them
- _protocol['idp_id'] = protocol['idp_id']
- return _protocol
-
- def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
- idp = self.params.get('idp_id')
-
- if name:
- protocol = self.conn.identity.get_federation_protocol(idp, name)
- protocol = self.normalize_protocol(protocol)
- self.exit_json(changed=False, protocols=[protocol])
-
- else:
- protocols = list(map(self.normalize_protocol, self.conn.identity.federation_protocols(idp)))
- self.exit_json(changed=False, protocols=protocols)
-
-
-def main():
- module = IdentityFederationProtocolInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider.py
deleted file mode 100644
index 35606cca7..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: federation_idp
-short_description: manage a federation Identity Provider
-author: OpenStack Ansible SIG
-description:
- - Manage a federation Identity Provider.
-options:
- name:
- description:
- - The name of the Identity Provider.
- type: str
- required: true
- aliases: ['id']
- state:
- description:
- - Whether the Identity Provider should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
- type: str
- description:
- description:
- - The description of the Identity Provider.
- type: str
- domain_id:
- description:
- - The ID of a domain that is associated with the Identity Provider.
- Federated users that authenticate with the Identity Provider will be
- created under the domain specified.
- - Required when creating a new Identity Provider.
- type: str
- enabled:
- description:
- - Whether the Identity Provider is enabled or not.
- - Will default to C(true) when creating a new Identity Provider.
- type: bool
- aliases: ['is_enabled']
- remote_ids:
- description:
- - "List of the unique Identity Provider's remote IDs."
- - Will default to an empty list when creating a new Identity Provider.
- type: list
- elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Create an identity provider
- openstack.cloud.federation_idp:
- cloud: example_cloud
- name: example_provider
- domain_id: 0123456789abcdef0123456789abcdef
- description: 'My example IDP'
- remote_ids:
- - 'https://auth.example.com/auth/realms/ExampleRealm'
-
-- name: Delete an identity provider
- openstack.cloud.federation_idp:
- cloud: example_cloud
- name: example_provider
- state: absent
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationIdpModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True, aliases=['id']),
- state=dict(default='present', choices=['absent', 'present']),
- description=dict(),
- domain_id=dict(),
- enabled=dict(type='bool', aliases=['is_enabled']),
- remote_ids=dict(type='list', elements='str'),
- )
- module_kwargs = dict(
- supports_check_mode=True,
- )
-
- def normalize_idp(self, idp):
- """
- Normalizes the IDP definitions so that the outputs are consistent with the
- parameters
-
- - "enabled" (parameter) == "is_enabled" (SDK)
- - "name" (parameter) == "id" (SDK)
- """
- if idp is None:
- return None
-
- _idp = idp.to_dict()
- _idp['enabled'] = idp['is_enabled']
- _idp['name'] = idp['id']
- return _idp
-
- def delete_identity_provider(self, idp):
- """
- Delete an existing Identity Provider
-
- returns: the "Changed" state
- """
- if idp is None:
- return False
-
- if self.ansible.check_mode:
- return True
-
- self.conn.identity.delete_identity_provider(idp)
- return True
-
- def create_identity_provider(self, name):
- """
- Create a new Identity Provider
-
- returns: the "Changed" state and the new identity provider
- """
-
- if self.ansible.check_mode:
- return True, None
-
- description = self.params.get('description')
- enabled = self.params.get('enabled')
- domain_id = self.params.get('domain_id')
- remote_ids = self.params.get('remote_ids')
-
- if enabled is None:
- enabled = True
- if remote_ids is None:
- remote_ids = []
-
- attributes = {
- 'domain_id': domain_id,
- 'enabled': enabled,
- 'remote_ids': remote_ids,
- }
- if description is not None:
- attributes['description'] = description
-
- idp = self.conn.identity.create_identity_provider(id=name, **attributes)
- return (True, idp)
-
- def update_identity_provider(self, idp):
- """
- Update an existing Identity Provider
-
- returns: the "Changed" state and the new identity provider
- """
-
- description = self.params.get('description')
- enabled = self.params.get('enabled')
- domain_id = self.params.get('domain_id')
- remote_ids = self.params.get('remote_ids')
-
- attributes = {}
-
- if (description is not None) and (description != idp.description):
- attributes['description'] = description
- if (enabled is not None) and (enabled != idp.is_enabled):
- attributes['enabled'] = enabled
- if (domain_id is not None) and (domain_id != idp.domain_id):
- attributes['domain_id'] = domain_id
- if (remote_ids is not None) and (remote_ids != idp.remote_ids):
- attributes['remote_ids'] = remote_ids
-
- if not attributes:
- return False, idp
-
- if self.ansible.check_mode:
- return True, None
-
- new_idp = self.conn.identity.update_identity_provider(idp, **attributes)
- return (True, new_idp)
-
- def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
- state = self.params.get('state')
- changed = False
-
- idp = self.conn.identity.find_identity_provider(name)
-
- if state == 'absent':
- if idp is not None:
- changed = self.delete_identity_provider(idp)
- self.exit_json(changed=changed)
-
- # state == 'present'
- else:
- if idp is None:
- if self.params.get('domain_id') is None:
- self.fail_json(msg='A domain_id must be passed when creating'
- ' an identity provider')
- (changed, idp) = self.create_identity_provider(name)
- idp = self.normalize_idp(idp)
- self.exit_json(changed=changed, identity_provider=idp)
-
- (changed, new_idp) = self.update_identity_provider(idp)
- new_idp = self.normalize_idp(new_idp)
- self.exit_json(changed=changed, identity_provider=new_idp)
-
-
-def main():
- module = IdentityFederationIdpModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider_info.py
deleted file mode 100644
index 4fe719494..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_identity_provider_info.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: federation_idp_info
-short_description: Get the information about the available federation identity
- providers
-author: OpenStack Ansible SIG
-description:
- - Fetch a federation identity provider.
-options:
- name:
- description:
- - The name of the identity provider to fetch.
- - If I(name) is specified, the module will return failed if the identity
- provider doesn't exist.
- type: str
- aliases: ['id']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Fetch a specific identity provider
- openstack.cloud.federation_idp_info:
- cloud: example_cloud
- name: example_provider
-
-- name: Fetch all providers
- openstack.cloud.federation_idp_info:
- cloud: example_cloud
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationIdpInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(aliases=['id']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def normalize_idp(self, idp):
- """
- Normalizes the IDP definitions so that the outputs are consistent with the
- parameters
-
- - "enabled" (parameter) == "is_enabled" (SDK)
- - "name" (parameter) == "id" (SDK)
- """
- if idp is None:
- return
-
- _idp = idp.to_dict()
- _idp['enabled'] = idp['is_enabled']
- _idp['name'] = idp['id']
- return _idp
-
- def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
-
- if name:
- idp = self.normalize_idp(self.conn.identity.get_identity_provider(name))
- self.exit_json(changed=False, identity_providers=[idp])
-
- else:
- providers = list(map(self.normalize_idp, self.conn.identity.identity_providers()))
- self.exit_json(changed=False, identity_providers=providers)
-
-
-def main():
- module = IdentityFederationIdpInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping.py
deleted file mode 100644
index 6c07a41da..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: federation_mapping
-short_description: Manage a federation mapping
-author: OpenStack Ansible SIG
-description:
- - Manage a federation mapping.
-options:
- name:
- description:
- - The name of the mapping to manage.
- required: true
- type: str
- aliases: ['id']
- state:
- description:
- - Whether the mapping should be C(present) or C(absent).
- choices: ['present', 'absent']
- default: present
- type: str
- rules:
- description:
- - The rules that comprise the mapping. These are pairs of I(local) and
- I(remote) definitions. For more details on how these work please see
- the OpenStack documentation
- U(https://docs.openstack.org/keystone/latest/admin/federation/mapping_combinations.html).
- - Required if I(state=present)
- type: list
- elements: dict
- suboptions:
- local:
- description:
- - Information on what local attributes will be mapped.
- required: true
- type: list
- elements: dict
- remote:
- description:
- - Information on what remote attributes will be mapped.
- required: true
- type: list
- elements: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Create a new mapping
- openstack.cloud.federation_mapping:
- cloud: example_cloud
- name: example_mapping
- rules:
- - local:
- - user:
- name: '{0}'
- - group:
- id: '0cd5e9'
- remote:
- - type: UserName
- - type: orgPersonType
- any_one_of:
- - Contractor
- - SubContractor
-
-- name: Delete a mapping
- openstack.cloud.federation_mapping:
- name: example_mapping
- state: absent
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationMappingModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True, aliases=['id']),
- state=dict(default='present', choices=['absent', 'present']),
- rules=dict(type='list', elements='dict', options=dict(
- local=dict(required=True, type='list', elements='dict'),
- remote=dict(required=True, type='list', elements='dict')
- )),
- )
- module_kwargs = dict(
- required_if=[('state', 'present', ['rules'])],
- supports_check_mode=True
- )
-
- def normalize_mapping(self, mapping):
- """
- Normalizes the mapping definitions so that the outputs are consistent with
- the parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if mapping is None:
- return None
-
- _mapping = mapping.to_dict()
- _mapping['name'] = mapping['id']
- return _mapping
-
- def create_mapping(self, name):
- """
- Attempt to create a Mapping
-
- returns: A tuple containing the "Changed" state and the created mapping
- """
-
- if self.ansible.check_mode:
- return (True, None)
-
- rules = self.params.get('rules')
-
- mapping = self.conn.identity.create_mapping(id=name, rules=rules)
- return (True, mapping)
-
- def delete_mapping(self, mapping):
- """
- Attempt to delete a Mapping
-
- returns: the "Changed" state
- """
- if mapping is None:
- return False
-
- if self.ansible.check_mode:
- return True
-
- self.conn.identity.delete_mapping(mapping)
- return True
-
- def update_mapping(self, mapping):
- """
- Attempt to delete a Mapping
-
- returns: The "Changed" state and the the new mapping
- """
-
- current_rules = mapping.rules
- new_rules = self.params.get('rules')
-
- # Nothing to do
- if current_rules == new_rules:
- return (False, mapping)
-
- if self.ansible.check_mode:
- return (True, None)
-
- new_mapping = self.conn.identity.update_mapping(mapping, rules=new_rules)
- return (True, new_mapping)
-
- def run(self):
- """ Module entry point """
-
- name = self.params.get('name')
- state = self.params.get('state')
- changed = False
-
- mapping = self.conn.identity.find_mapping(name)
-
- if state == 'absent':
- if mapping is not None:
- changed = self.delete_mapping(mapping)
- self.exit_json(changed=changed)
-
- # state == 'present'
- else:
- if len(self.params.get('rules')) < 1:
- self.fail_json(msg='At least one rule must be passed')
-
- if mapping is None:
- (changed, mapping) = self.create_mapping(name)
- mapping = self.normalize_mapping(mapping)
- self.exit_json(changed=changed, mapping=mapping)
- else:
- (changed, new_mapping) = self.update_mapping(mapping)
- new_mapping = self.normalize_mapping(new_mapping)
- self.exit_json(mapping=new_mapping, changed=changed)
-
-
-def main():
- module = IdentityFederationMappingModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping_info.py
deleted file mode 100644
index 2ba317c98..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_mapping_info.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: federation_mapping_info
-short_description: Get the information about the available federation mappings
-author: OpenStack Ansible SIG
-description:
- - Fetch a federation mapping.
-options:
- name:
- description:
- - The name of the mapping to fetch.
- - If I(name) is specified, the module will return failed if the mapping
- doesn't exist.
- type: str
- aliases: ['id']
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.44"
-extends_documentation_fragment:
- - openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Fetch a specific mapping
- openstack.cloud.federation_mapping_info:
- cloud: example_cloud
- name: example_mapping
-
-- name: Fetch all mappings
- openstack.cloud.federation_mapping_info:
- cloud: example_cloud
-'''
-
-RETURN = '''
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityFederationMappingInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(aliases=['id']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- module_min_sdk_version = "0.44"
-
- def normalize_mapping(self, mapping):
- """
- Normalizes the mapping definitions so that the outputs are consistent with the
- parameters
-
- - "name" (parameter) == "id" (SDK)
- """
- if mapping is None:
- return None
-
- _mapping = mapping.to_dict()
- _mapping['name'] = mapping['id']
- return _mapping
-
- def run(self):
- """ Module entry point """
- name = self.params.get('name')
-
- if name:
- mapping = self.normalize_mapping(
- self.conn.identity.get_mapping(name))
- self.exit_json(changed=False, mappings=[mapping])
- else:
- mappings = list(map(
- self.normalize_mapping, self.conn.identity.mappings()))
- self.exit_json(changed=False, mappings=mappings)
-
-
-def main():
- module = IdentityFederationMappingInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_role.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_role.py
deleted file mode 100644
index 272d98216..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_role.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 IBM
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_role
-short_description: Manage OpenStack Identity Roles
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack Identity Roles.
-options:
- name:
- description:
- - Role Name
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a role named "demo"
-- openstack.cloud.identity_role:
- cloud: mycloud
- state: present
- name: demo
-
-# Delete the role named "demo"
-- openstack.cloud.identity_role:
- cloud: mycloud
- state: absent
- name: demo
-'''
-
-RETURN = '''
-role:
- description: Dictionary describing the role.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- domain_id:
- description: Domain to which the role belongs
- type: str
- sample: default
- id:
- description: Unique role ID.
- type: str
- sample: "677bfab34c844a01b88a217aa12ec4c2"
- name:
- description: Role name.
- type: str
- sample: "demo"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityRoleModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _system_state_change(self, state, role):
- if state == 'present' and not role:
- return True
- if state == 'absent' and role:
- return True
- return False
-
- def run(self):
- name = self.params.get('name')
- state = self.params.get('state')
-
- role = self.conn.get_role(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, role))
-
- changed = False
- if state == 'present':
- if role is None:
- role = self.conn.create_role(name=name)
- changed = True
- self.exit_json(changed=changed, role=role)
- elif state == 'absent' and role is not None:
- self.conn.identity.delete_role(role['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityRoleModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_service.py b/ansible_collections/openstack/cloud/plugins/modules/os_keystone_service.py
deleted file mode 100644
index 6d1962f3e..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_keystone_service.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/python
-# Copyright 2016 Sam Yaple
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: catalog_service
-short_description: Manage OpenStack Identity services
-author: OpenStack Ansible SIG
-description:
- - Create, update, or delete OpenStack Identity service. If a service
- with the supplied name already exists, it will be updated with the
- new description and enabled attributes.
-options:
- name:
- description:
- - Name of the service
- required: true
- type: str
- description:
- description:
- - Description of the service
- type: str
- enabled:
- description:
- - Is the service enabled
- type: bool
- default: 'yes'
- aliases: ['is_enabled']
- type:
- description:
- - The type of service
- required: true
- type: str
- aliases: ['service_type']
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a service for glance
-- openstack.cloud.catalog_service:
- cloud: mycloud
- state: present
- name: glance
- type: image
- description: OpenStack Image Service
-# Delete a service
-- openstack.cloud.catalog_service:
- cloud: mycloud
- state: absent
- name: glance
- type: image
-'''
-
-RETURN = '''
-service:
- description: Dictionary describing the service.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Service ID.
- type: str
- sample: "3292f020780b4d5baf27ff7e1d224c44"
- name:
- description: Service name.
- type: str
- sample: "glance"
- type:
- description: Service type.
- type: str
- sample: "image"
- service_type:
- description: Service type.
- type: str
- sample: "image"
- description:
- description: Service description.
- type: str
- sample: "OpenStack Image Service"
- enabled:
- description: Service status.
- type: bool
- sample: True
-id:
- description: The service ID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "3292f020780b4d5baf27ff7e1d224c44"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityCatalogServiceModule(OpenStackModule):
- argument_spec = dict(
- description=dict(default=None),
- enabled=dict(default=True, aliases=['is_enabled'], type='bool'),
- name=dict(required=True),
- type=dict(required=True, aliases=['service_type']),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _needs_update(self, service):
- for parameter in ('enabled', 'description', 'type'):
- if service[parameter] != self.params[parameter]:
- return True
- return False
-
- def _system_state_change(self, service):
- state = self.params['state']
- if state == 'absent' and service:
- return True
-
- if state == 'present':
- if service is None:
- return True
- return self._needs_update(service)
-
- return False
-
- def run(self):
- description = self.params['description']
- enabled = self.params['enabled']
- name = self.params['name']
- state = self.params['state']
- type = self.params['type']
-
- services = self.conn.search_services(
- name_or_id=name, filters=(dict(type=type) if type else None))
-
- service = None
- if len(services) > 1:
- self.fail_json(
- msg='Service name %s and type %s are not unique'
- % (name, type))
- elif len(services) == 1:
- service = services[0]
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(service))
-
- args = {'name': name, 'enabled': enabled, 'type': type}
- if description:
- args['description'] = description
-
- if state == 'present':
- if service is None:
- service = self.conn.create_service(**args)
- changed = True
- else:
- if self._needs_update(service):
- service = self.conn.update_service(service,
- **args)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, service=service, id=service.id)
-
- elif state == 'absent':
- if service is None:
- changed = False
- else:
- self.conn.identity.delete_service(service.id)
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityCatalogServiceModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_listener.py b/ansible_collections/openstack/cloud/plugins/modules/os_listener.py
deleted file mode 100644
index f4cdad48a..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_listener.py
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst Cloud Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: lb_listener
-short_description: Add/Delete a listener for a load balancer from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove a listener for a load balancer from the OpenStack load-balancer service.
-options:
- name:
- description:
- - Name that has to be given to the listener
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- loadbalancer:
- description:
- - The name or id of the load balancer that this listener belongs to.
- required: true
- type: str
- protocol:
- description:
- - The protocol for the listener.
- choices: [HTTP, HTTPS, TCP, TERMINATED_HTTPS, UDP, SCTP]
- default: HTTP
- type: str
- protocol_port:
- description:
- - The protocol port number for the listener.
- default: 80
- type: int
- timeout_client_data:
- description:
- - Client inactivity timeout in milliseconds.
- default: 50000
- type: int
- timeout_member_data:
- description:
- - Member inactivity timeout in milliseconds.
- default: 50000
- type: int
- wait:
- description:
- - If the module should wait for the load balancer to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the load balancer to get
- into ACTIVE state.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The listener UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-listener:
- description: Dictionary describing the listener.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the listener.
- type: str
- sample: "test"
- description:
- description: The listener description.
- type: str
- sample: "description"
- load_balancer_id:
- description: The load balancer UUID this listener belongs to.
- type: str
- sample: "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"
- loadbalancers:
- description: A list of load balancer IDs..
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- provisioning_status:
- description: The provisioning status of the listener.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the listener.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the listener.
- type: bool
- sample: true
- protocol:
- description: The protocol for the listener.
- type: str
- sample: "HTTP"
- protocol_port:
- description: The protocol port number for the listener.
- type: int
- sample: 80
- timeout_client_data:
- description: Client inactivity timeout in milliseconds.
- type: int
- sample: 50000
- timeout_member_data:
- description: Member inactivity timeout in milliseconds.
- type: int
- sample: 50000
-'''
-
-EXAMPLES = '''
-# Create a listener, wait for the loadbalancer to be active.
-- openstack.cloud.lb_listener:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-listener
- loadbalancer: test-loadbalancer
- protocol: HTTP
- protocol_port: 8080
-
-# Create a listener, do not wait for the loadbalancer to be active.
-- openstack.cloud.lb_listener:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-listener
- loadbalancer: test-loadbalancer
- protocol: HTTP
- protocol_port: 8080
- wait: no
-
-# Delete a listener
-- openstack.cloud.lb_listener:
- cloud: mycloud
- endpoint_type: admin
- state: absent
- name: test-listener
- loadbalancer: test-loadbalancer
-
-# Create a listener, increase timeouts for connection persistence (for SSH for example).
-- openstack.cloud.lb_listener:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-listener
- loadbalancer: test-loadbalancer
- protocol: TCP
- protocol_port: 22
- timeout_client_data: 1800000
- timeout_member_data: 1800000
-'''
-
-import time
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class LoadbalancerListenerModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- loadbalancer=dict(required=True),
- protocol=dict(default='HTTP',
- choices=['HTTP', 'HTTPS', 'TCP', 'TERMINATED_HTTPS', 'UDP', 'SCTP']),
- protocol_port=dict(default=80, type='int', required=False),
- timeout_client_data=dict(default=50000, type='int', required=False),
- timeout_member_data=dict(default=50000, type='int', required=False),
- )
- module_kwargs = dict()
-
- def _lb_wait_for_status(self, lb, status, failures, interval=5):
- """Wait for load balancer to be in a particular provisioning status."""
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- lb = self.conn.load_balancer.get_load_balancer(lb.id)
- if lb.provisioning_status == status:
- return None
- if lb.provisioning_status in failures:
- self.fail_json(
- msg="Load Balancer %s transitioned to failure state %s" %
- (lb.id, lb.provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for Load Balancer %s to transition to %s" %
- (lb.id, status)
- )
-
- def run(self):
- loadbalancer = self.params['loadbalancer']
- loadbalancer_id = None
-
- changed = False
- listener = self.conn.load_balancer.find_listener(
- name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if not listener:
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- loadbalancer_id = lb.id
-
- listener = self.conn.load_balancer.create_listener(
- name=self.params['name'],
- loadbalancer_id=loadbalancer_id,
- protocol=self.params['protocol'],
- protocol_port=self.params['protocol_port'],
- timeout_client_data=self.params['timeout_client_data'],
- timeout_member_data=self.params['timeout_member_data'],
- )
- changed = True
-
- if not self.params['wait']:
- self.exit_json(
- changed=changed, listener=listener.to_dict(),
- id=listener.id)
-
- if self.params['wait']:
- # Check in case the listener already exists.
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- self._lb_wait_for_status(lb, "ACTIVE", ["ERROR"])
-
- self.exit_json(
- changed=changed, listener=listener.to_dict(), id=listener.id)
- elif self.params['state'] == 'absent':
- if not listener:
- changed = False
- else:
- self.conn.load_balancer.delete_listener(listener)
- changed = True
-
- if self.params['wait']:
- # Wait for the load balancer to be active after deleting
- # the listener.
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer
- )
- self._lb_wait_for_status(lb, "ACTIVE", ["ERROR"])
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = LoadbalancerListenerModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_loadbalancer.py b/ansible_collections/openstack/cloud/plugins/modules/os_loadbalancer.py
deleted file mode 100644
index 336da966c..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_loadbalancer.py
+++ /dev/null
@@ -1,691 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst Cloud Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: loadbalancer
-short_description: Add/Delete load balancer from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove load balancer from the OpenStack load-balancer
- service(Octavia). Load balancer update is not supported for now.
-options:
- name:
- description:
- - The name of the load balancer.
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- flavor:
- description:
- - The flavor of the load balancer.
- type: str
- vip_network:
- description:
- - The name or id of the network for the virtual IP of the load balancer.
- One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
- for creation.
- type: str
- vip_subnet:
- description:
- - The name or id of the subnet for the virtual IP of the load balancer.
- One of I(vip_network), I(vip_subnet), or I(vip_port) must be specified
- for creation.
- type: str
- vip_port:
- description:
- - The name or id of the load balancer virtual IP port. One of
- I(vip_network), I(vip_subnet), or I(vip_port) must be specified for
- creation.
- type: str
- vip_address:
- description:
- - IP address of the load balancer virtual IP.
- type: str
- public_ip_address:
- description:
- - Public IP address associated with the VIP.
- type: str
- auto_public_ip:
- description:
- - Allocate a public IP address and associate with the VIP automatically.
- type: bool
- default: 'no'
- public_network:
- description:
- - The name or ID of a Neutron external network.
- type: str
- delete_public_ip:
- description:
- - When C(state=absent) and this option is true, any public IP address
- associated with the VIP will be deleted along with the load balancer.
- type: bool
- default: 'no'
- listeners:
- description:
- - A list of listeners that attached to the load balancer.
- suboptions:
- name:
- description:
- - The listener name or ID.
- protocol:
- description:
- - The protocol for the listener.
- default: HTTP
- protocol_port:
- description:
- - The protocol port number for the listener.
- default: 80
- allowed_cidrs:
- description:
- - A list of IPv4, IPv6 or mix of both CIDRs to be allowed access to the listener. The default is all allowed.
- When a list of CIDRs is provided, the default switches to deny all.
- Ignored on unsupported Octavia versions (less than 2.12)
- default: []
- pool:
- description:
- - The pool attached to the listener.
- suboptions:
- name:
- description:
- - The pool name or ID.
- protocol:
- description:
- - The protocol for the pool.
- default: HTTP
- lb_algorithm:
- description:
- - The load balancing algorithm for the pool.
- default: ROUND_ROBIN
- members:
- description:
- - A list of members that added to the pool.
- suboptions:
- name:
- description:
- - The member name or ID.
- address:
- description:
- - The IP address of the member.
- protocol_port:
- description:
- - The protocol port number for the member.
- default: 80
- subnet:
- description:
- - The name or ID of the subnet the member service is
- accessible from.
- elements: dict
- type: list
- wait:
- description:
- - If the module should wait for the load balancer to be created or
- deleted.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The load balancer UUID.
- returned: On success when C(state=present)
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-loadbalancer:
- description: Dictionary describing the load balancer.
- returned: On success when C(state=present)
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the load balancer.
- type: str
- sample: "lingxian_test"
- vip_network_id:
- description: Network ID the load balancer virtual IP port belongs in.
- type: str
- sample: "f171db43-56fd-41cf-82d7-4e91d741762e"
- vip_subnet_id:
- description: Subnet ID the load balancer virtual IP port belongs in.
- type: str
- sample: "c53e3c70-9d62-409a-9f71-db148e7aa853"
- vip_port_id:
- description: The load balancer virtual IP port ID.
- type: str
- sample: "2061395c-1c01-47ab-b925-c91b93df9c1d"
- vip_address:
- description: The load balancer virtual IP address.
- type: str
- sample: "192.168.2.88"
- public_vip_address:
- description: The load balancer public VIP address.
- type: str
- sample: "10.17.8.254"
- provisioning_status:
- description: The provisioning status of the load balancer.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the load balancer.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the load balancer.
- type: bool
- sample: true
- listeners:
- description: The associated listener IDs, if any.
- type: list
- sample: [{"id": "7aa1b380-beec-459c-a8a7-3a4fb6d30645"}, {"id": "692d06b8-c4f8-4bdb-b2a3-5a263cc23ba6"}]
- pools:
- description: The associated pool IDs, if any.
- type: list
- sample: [{"id": "27b78d92-cee1-4646-b831-e3b90a7fa714"}, {"id": "befc1fb5-1992-4697-bdb9-eee330989344"}]
-'''
-
-EXAMPLES = '''
-# Create a load balancer by specifying the VIP subnet.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: present
- name: my_lb
- vip_subnet: my_subnet
- timeout: 150
-
-# Create a load balancer by specifying the VIP network and the IP address.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: present
- name: my_lb
- vip_network: my_network
- vip_address: 192.168.0.11
-
-# Create a load balancer together with its sub-resources in the 'all in one'
-# way. A public IP address is also allocated to the load balancer VIP.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- name: lingxian_test
- state: present
- vip_subnet: kong_subnet
- auto_public_ip: yes
- public_network: public
- listeners:
- - name: lingxian_80
- protocol: TCP
- protocol_port: 80
- pool:
- name: lingxian_80_pool
- protocol: TCP
- members:
- - name: mywebserver1
- address: 192.168.2.81
- protocol_port: 80
- subnet: webserver_subnet
- - name: lingxian_8080
- protocol: TCP
- protocol_port: 8080
- pool:
- name: lingxian_8080-pool
- protocol: TCP
- members:
- - name: mywebserver2
- address: 192.168.2.82
- protocol_port: 8080
- wait: yes
- timeout: 600
-
-# Delete a load balancer(and all its related resources)
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: absent
- name: my_lb
-
-# Delete a load balancer(and all its related resources) together with the
-# public IP address(if any) attached to it.
-- openstack.cloud.loadbalancer:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: passme
- project_name: admin
- state: absent
- name: my_lb
- delete_public_ip: yes
-'''
-
-import time
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class LoadBalancerModule(OpenStackModule):
-
- def _wait_for_pool(self, pool, provisioning_status, operating_status, failures, interval=5):
- """Wait for pool to be in a particular provisioning and operating status."""
- timeout = self.params['timeout'] # reuse loadbalancer timeout
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- pool = self.conn.load_balancer.find_pool(name_or_id=pool.id)
- if pool:
- if pool.provisioning_status == provisioning_status and pool.operating_status == operating_status:
- return None
- if pool.provisioning_status in failures:
- self.fail_json(
- msg="Pool %s transitioned to failure state %s" %
- (pool.id, pool.provisioning_status)
- )
- else:
- if provisioning_status == "DELETED":
- return None
- else:
- self.fail_json(
- msg="Pool %s transitioned to DELETED" % pool.id
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- def _wait_for_lb(self, lb, status, failures, interval=5):
- """Wait for load balancer to be in a particular provisioning status."""
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- lb = self.conn.load_balancer.find_load_balancer(lb.id)
-
- if lb:
- if lb.provisioning_status == status:
- return None
- if lb.provisioning_status in failures:
- self.fail_json(
- msg="Load Balancer %s transitioned to failure state %s" %
- (lb.id, lb.provisioning_status)
- )
- else:
- if status == "DELETED":
- return None
- else:
- self.fail_json(
- msg="Load Balancer %s transitioned to DELETED" % lb.id
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for Load Balancer %s to transition to %s" %
- (lb.id, status)
- )
-
- argument_spec = dict(
- name=dict(required=True),
- flavor=dict(required=False),
- state=dict(default='present', choices=['absent', 'present']),
- vip_network=dict(required=False),
- vip_subnet=dict(required=False),
- vip_port=dict(required=False),
- vip_address=dict(required=False),
- listeners=dict(type='list', default=[], elements='dict'),
- public_ip_address=dict(required=False, default=None),
- auto_public_ip=dict(required=False, default=False, type='bool'),
- public_network=dict(required=False),
- delete_public_ip=dict(required=False, default=False, type='bool'),
- )
- module_kwargs = dict(supports_check_mode=True)
-
- def run(self):
- flavor = self.params['flavor']
- vip_network = self.params['vip_network']
- vip_subnet = self.params['vip_subnet']
- vip_port = self.params['vip_port']
- listeners = self.params['listeners']
- public_vip_address = self.params['public_ip_address']
- allocate_fip = self.params['auto_public_ip']
- delete_fip = self.params['delete_public_ip']
- public_network = self.params['public_network']
-
- vip_network_id = None
- vip_subnet_id = None
- vip_port_id = None
- flavor_id = None
-
- try:
- max_microversion = 1
- max_majorversion = 2
- changed = False
- lb = self.conn.load_balancer.find_load_balancer(
- name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if lb and self.ansible.check_mode:
- self.exit_json(changed=False)
- if lb:
- self.exit_json(changed=False)
- ver_data = self.conn.load_balancer.get_all_version_data()
- region = list(ver_data.keys())[0]
- interface_type = list(ver_data[region].keys())[0]
- versions = ver_data[region][interface_type]['load-balancer']
- for ver in versions:
- if ver['status'] == 'CURRENT':
- curversion = ver['version'].split(".")
- max_majorversion = int(curversion[0])
- max_microversion = int(curversion[1])
-
- if not lb:
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- if not (vip_network or vip_subnet or vip_port):
- self.fail_json(
- msg="One of vip_network, vip_subnet, or vip_port must "
- "be specified for load balancer creation"
- )
-
- if flavor:
- _flavor = self.conn.load_balancer.find_flavor(flavor)
- if not _flavor:
- self.fail_json(
- msg='flavor %s not found' % flavor
- )
- flavor_id = _flavor.id
-
- if vip_network:
- network = self.conn.get_network(vip_network)
- if not network:
- self.fail_json(
- msg='network %s is not found' % vip_network
- )
- vip_network_id = network.id
- if vip_subnet:
- subnet = self.conn.get_subnet(vip_subnet)
- if not subnet:
- self.fail_json(
- msg='subnet %s is not found' % vip_subnet
- )
- vip_subnet_id = subnet.id
- if vip_port:
- port = self.conn.get_port(vip_port)
-
- if not port:
- self.fail_json(
- msg='port %s is not found' % vip_port
- )
- vip_port_id = port.id
- lbargs = {"name": self.params['name'],
- "vip_network_id": vip_network_id,
- "vip_subnet_id": vip_subnet_id,
- "vip_port_id": vip_port_id,
- "vip_address": self.params['vip_address']
- }
- if flavor_id is not None:
- lbargs["flavor_id"] = flavor_id
-
- lb = self.conn.load_balancer.create_load_balancer(**lbargs)
-
- changed = True
-
- if not listeners and not self.params['wait']:
- self.exit_json(
- changed=changed,
- loadbalancer=lb.to_dict(),
- id=lb.id
- )
-
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- for listener_def in listeners:
- listener_name = listener_def.get("name")
- pool_def = listener_def.get("pool")
-
- if not listener_name:
- self.fail_json(msg='listener name is required')
-
- listener = self.conn.load_balancer.find_listener(
- name_or_id=listener_name
- )
-
- if not listener:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- protocol = listener_def.get("protocol", "HTTP")
- protocol_port = listener_def.get("protocol_port", 80)
- allowed_cidrs = listener_def.get("allowed_cidrs", [])
- listenerargs = {"name": listener_name,
- "loadbalancer_id": lb.id,
- "protocol": protocol,
- "protocol_port": protocol_port
- }
- if max_microversion >= 12 and max_majorversion >= 2:
- listenerargs['allowed_cidrs'] = allowed_cidrs
- listener = self.conn.load_balancer.create_listener(**listenerargs)
- changed = True
-
- # Ensure pool in the listener.
- if pool_def:
- pool_name = pool_def.get("name")
- members = pool_def.get('members', [])
-
- if not pool_name:
- self.fail_json(msg='pool name is required')
-
- pool = self.conn.load_balancer.find_pool(name_or_id=pool_name)
-
- if not pool:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- protocol = pool_def.get("protocol", "HTTP")
- lb_algorithm = pool_def.get("lb_algorithm",
- "ROUND_ROBIN")
-
- pool = self.conn.load_balancer.create_pool(
- name=pool_name,
- listener_id=listener.id,
- protocol=protocol,
- lb_algorithm=lb_algorithm
- )
- self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
- changed = True
-
- # Ensure members in the pool
- for member_def in members:
- member_name = member_def.get("name")
- if not member_name:
- self.fail_json(msg='member name is required')
-
- member = self.conn.load_balancer.find_member(member_name,
- pool.id
- )
-
- if not member:
- self._wait_for_lb(lb, "ACTIVE", ["ERROR"])
-
- address = member_def.get("address")
- if not address:
- self.fail_json(
- msg='member address for member %s is '
- 'required' % member_name
- )
-
- subnet_id = member_def.get("subnet")
- if subnet_id:
- subnet = self.conn.get_subnet(subnet_id)
- if not subnet:
- self.fail_json(
- msg='subnet %s for member %s is not '
- 'found' % (subnet_id, member_name)
- )
- subnet_id = subnet.id
-
- protocol_port = member_def.get("protocol_port", 80)
-
- member = self.conn.load_balancer.create_member(
- pool,
- name=member_name,
- address=address,
- protocol_port=protocol_port,
- subnet_id=subnet_id
- )
- self._wait_for_pool(pool, "ACTIVE", "ONLINE", ["ERROR"])
- changed = True
-
- # Associate public ip to the load balancer VIP. If
- # public_vip_address is provided, use that IP, otherwise, either
- # find an available public ip or create a new one.
- fip = None
- orig_public_ip = None
- new_public_ip = None
- if public_vip_address or allocate_fip:
- ips = self.conn.network.ips(
- port_id=lb.vip_port_id,
- fixed_ip_address=lb.vip_address
- )
- ips = list(ips)
- if ips:
- orig_public_ip = ips[0]
- new_public_ip = orig_public_ip.floating_ip_address
-
- if public_vip_address and public_vip_address != orig_public_ip:
- fip = self.conn.network.find_ip(public_vip_address)
-
- if not fip:
- self.fail_json(
- msg='Public IP %s is unavailable' % public_vip_address
- )
-
- # Release origin public ip first
- self.conn.network.update_ip(
- orig_public_ip,
- fixed_ip_address=None,
- port_id=None
- )
-
- # Associate new public ip
- self.conn.network.update_ip(
- fip,
- fixed_ip_address=lb.vip_address,
- port_id=lb.vip_port_id
- )
-
- new_public_ip = public_vip_address
- changed = True
- elif allocate_fip and not orig_public_ip:
- fip = self.conn.network.find_available_ip()
- if not fip:
- if not public_network:
- self.fail_json(msg="Public network is not provided")
-
- pub_net = self.conn.network.find_network(public_network)
- if not pub_net:
- self.fail_json(
- msg='Public network %s not found' %
- public_network
- )
- fip = self.conn.network.create_ip(
- floating_network_id=pub_net.id
- )
-
- self.conn.network.update_ip(
- fip,
- fixed_ip_address=lb.vip_address,
- port_id=lb.vip_port_id
- )
-
- new_public_ip = fip.floating_ip_address
- changed = True
-
- # Include public_vip_address in the result.
- lb = self.conn.load_balancer.find_load_balancer(name_or_id=lb.id)
- lb_dict = lb.to_dict()
- lb_dict.update({"public_vip_address": new_public_ip})
-
- self.exit_json(
- changed=changed,
- loadbalancer=lb_dict,
- id=lb.id
- )
- elif self.params['state'] == 'absent':
- changed = False
- public_vip_address = None
-
- if lb:
- if self.ansible.check_mode:
- self.exit_json(changed=True)
- if delete_fip:
- ips = self.conn.network.ips(
- port_id=lb.vip_port_id,
- fixed_ip_address=lb.vip_address
- )
- ips = list(ips)
- if ips:
- public_vip_address = ips[0]
-
- # Deleting load balancer with `cascade=False` does not make
- # sense because the deletion will always fail if there are
- # sub-resources.
- self.conn.load_balancer.delete_load_balancer(lb, cascade=True)
- changed = True
-
- if self.params['wait']:
- self._wait_for_lb(lb, "DELETED", ["ERROR"])
-
- if delete_fip and public_vip_address:
- self.conn.network.delete_ip(public_vip_address)
- changed = True
- elif self.ansible.check_mode:
- self.exit_json(changed=False)
-
- self.exit_json(changed=changed)
- except Exception as e:
- self.fail_json(msg=str(e))
-
-
-def main():
- module = LoadBalancerModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_member.py b/ansible_collections/openstack/cloud/plugins/modules/os_member.py
deleted file mode 100644
index 264f2b8e6..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_member.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst Cloud Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: lb_member
-short_description: Add/Delete a member for a pool in load balancer from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove a member for a pool from the OpenStack load-balancer service.
-options:
- name:
- description:
- - Name that has to be given to the member
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- pool:
- description:
- - The name or id of the pool that this member belongs to.
- required: true
- type: str
- protocol_port:
- description:
- - The protocol port number for the member.
- default: 80
- type: int
- address:
- description:
- - The IP address of the member.
- type: str
- subnet_id:
- description:
- - The subnet ID the member service is accessible from.
- type: str
- wait:
- description:
- - If the module should wait for the load balancer to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the load balancer to get
- into ACTIVE state.
- default: 180
- type: int
- monitor_address:
- description:
- - IP address used to monitor this member
- type: str
- monitor_port:
- description:
- - Port used to monitor this member
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The member UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-member:
- description: Dictionary describing the member.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the member.
- type: str
- sample: "test"
- description:
- description: The member description.
- type: str
- sample: "description"
- provisioning_status:
- description: The provisioning status of the member.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the member.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the member.
- type: bool
- sample: true
- protocol_port:
- description: The protocol port number for the member.
- type: int
- sample: 80
- subnet_id:
- description: The subnet ID the member service is accessible from.
- type: str
- sample: "489247fa-9c25-11e8-9679-00224d6b7bc1"
- address:
- description: The IP address of the backend member server.
- type: str
- sample: "192.168.2.10"
-'''
-
-EXAMPLES = '''
-# Create a member, wait for the member to be created.
-- openstack.cloud.lb_member:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-member
- pool: test-pool
- address: 192.168.10.3
- protocol_port: 8080
-
-# Delete a listener
-- openstack.cloud.lb_member:
- cloud: mycloud
- endpoint_type: admin
- state: absent
- name: test-member
- pool: test-pool
-'''
-
-import time
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class LoadbalancerMemberModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- pool=dict(required=True),
- address=dict(default=None),
- protocol_port=dict(default=80, type='int'),
- subnet_id=dict(default=None),
- monitor_address=dict(default=None),
- monitor_port=dict(default=None, type='int')
- )
- module_kwargs = dict()
-
- def _wait_for_member_status(self, pool_id, member_id, status,
- failures, interval=5):
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- member = self.conn.load_balancer.get_member(member_id, pool_id)
- provisioning_status = member.provisioning_status
- if provisioning_status == status:
- return member
- if provisioning_status in failures:
- self.fail_json(
- msg="Member %s transitioned to failure state %s" %
- (member_id, provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="Timeout waiting for member %s to transition to %s" %
- (member_id, status)
- )
-
- def run(self):
- name = self.params['name']
- pool = self.params['pool']
-
- changed = False
-
- pool_ret = self.conn.load_balancer.find_pool(name_or_id=pool)
- if not pool_ret:
- self.fail_json(msg='pool %s is not found' % pool)
-
- pool_id = pool_ret.id
- member = self.conn.load_balancer.find_member(name, pool_id)
-
- if self.params['state'] == 'present':
- if not member:
- member = self.conn.load_balancer.create_member(
- pool_ret,
- address=self.params['address'],
- name=name,
- protocol_port=self.params['protocol_port'],
- subnet_id=self.params['subnet_id'],
- monitor_address=self.params['monitor_address'],
- monitor_port=self.params['monitor_port']
- )
- changed = True
-
- if not self.params['wait']:
- self.exit_json(
- changed=changed, member=member.to_dict(), id=member.id)
-
- if self.params['wait']:
- member = self._wait_for_member_status(
- pool_id, member.id, "ACTIVE", ["ERROR"])
-
- self.exit_json(
- changed=changed, member=member.to_dict(), id=member.id)
-
- elif self.params['state'] == 'absent':
- if member:
- self.conn.load_balancer.delete_member(member, pool_ret)
- changed = True
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = LoadbalancerMemberModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_network.py b/ansible_collections/openstack/cloud/plugins/modules/os_network.py
deleted file mode 100644
index 780d49ba7..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_network.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: network
-short_description: Creates/removes networks from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Add or remove network from OpenStack.
-options:
- name:
- description:
- - Name to be assigned to the network.
- required: true
- type: str
- shared:
- description:
- - Whether this network is shared or not.
- type: bool
- default: 'no'
- admin_state_up:
- description:
- - Whether the state should be marked as up or down.
- type: bool
- default: 'yes'
- external:
- description:
- - Whether this network is externally accessible.
- type: bool
- default: 'no'
- state:
- description:
- - Indicate desired state of the resource.
- choices: ['present', 'absent']
- default: present
- type: str
- provider_physical_network:
- description:
- - The physical network where this network object is implemented.
- type: str
- provider_network_type:
- description:
- - The type of physical network that maps to this network resource.
- type: str
- provider_segmentation_id:
- description:
- - An isolated segment on the physical network. The I(network_type)
- attribute defines the segmentation model. For example, if the
- I(network_type) value is vlan, this ID is a vlan identifier. If
- the I(network_type) value is gre, this ID is a gre key.
- type: int
- project:
- description:
- - Project name or ID containing the network (name admin-only)
- type: str
- port_security_enabled:
- description:
- - Whether port security is enabled on the network or not.
- Network will use OpenStack defaults if this option is
- not utilised. Requires openstacksdk>=0.18.
- type: bool
- mtu_size:
- description:
- - The maximum transmission unit (MTU) value to address fragmentation.
- Network will use OpenStack defaults if this option is
- not provided. Requires openstacksdk>=0.18.
- type: int
- aliases: ['mtu']
- dns_domain:
- description:
- - The DNS domain value to set. Requires openstacksdk>=0.29.
- Network will use Openstack defaults if this option is
- not provided.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create an externally accessible network named 'ext_network'.
-- openstack.cloud.network:
- cloud: mycloud
- state: present
- name: ext_network
- external: true
-'''
-
-RETURN = '''
-network:
- description: Dictionary describing the network.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- id:
- description: Network ID.
- type: str
- sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
- name:
- description: Network name.
- type: str
- sample: "ext_network"
- shared:
- description: Indicates whether this network is shared across all tenants.
- type: bool
- sample: false
- status:
- description: Network status.
- type: str
- sample: "ACTIVE"
- mtu:
- description: The MTU of a network resource.
- type: int
- sample: 0
- dns_domain:
- description: The DNS domain of a network resource.
- type: str
- sample: "sample.openstack.org."
- admin_state_up:
- description: The administrative state of the network.
- type: bool
- sample: true
- port_security_enabled:
- description: The port security status
- type: bool
- sample: true
- router:external:
- description: Indicates whether this network is externally accessible.
- type: bool
- sample: true
- tenant_id:
- description: The tenant ID.
- type: str
- sample: "06820f94b9f54b119636be2728d216fc"
- subnets:
- description: The associated subnets.
- type: list
- sample: []
- "provider:physical_network":
- description: The physical network where this network object is implemented.
- type: str
- sample: my_vlan_net
- "provider:network_type":
- description: The type of physical network that maps to this network resource.
- type: str
- sample: vlan
- "provider:segmentation_id":
- description: An isolated segment on the physical network.
- type: str
- sample: 101
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class NetworkModule(OpenStackModule):
-
- argument_spec = dict(
- name=dict(required=True),
- shared=dict(default=False, type='bool'),
- admin_state_up=dict(default=True, type='bool'),
- external=dict(default=False, type='bool'),
- provider_physical_network=dict(required=False),
- provider_network_type=dict(required=False),
- provider_segmentation_id=dict(required=False, type='int'),
- state=dict(default='present', choices=['absent', 'present']),
- project=dict(default=None),
- port_security_enabled=dict(type='bool', min_ver='0.18.0'),
- mtu_size=dict(required=False, type='int', min_ver='0.18.0', aliases=['mtu']),
- dns_domain=dict(required=False, min_ver='0.29.0')
- )
-
- def run(self):
-
- state = self.params['state']
- name = self.params['name']
- shared = self.params['shared']
- admin_state_up = self.params['admin_state_up']
- external = self.params['external']
- provider_physical_network = self.params['provider_physical_network']
- provider_network_type = self.params['provider_network_type']
- provider_segmentation_id = self.params['provider_segmentation_id']
- project = self.params['project']
-
- kwargs = self.check_versioned(
- mtu_size=self.params['mtu_size'], port_security_enabled=self.params['port_security_enabled'],
- dns_domain=self.params['dns_domain']
- )
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- filters = {'tenant_id': project_id}
- else:
- project_id = None
- filters = None
- net = self.conn.get_network(name, filters=filters)
-
- if state == 'present':
- if not net:
- provider = {}
- if provider_physical_network:
- provider['physical_network'] = provider_physical_network
- if provider_network_type:
- provider['network_type'] = provider_network_type
- if provider_segmentation_id:
- provider['segmentation_id'] = provider_segmentation_id
-
- if project_id is not None:
- net = self.conn.create_network(name, shared, admin_state_up,
- external, provider, project_id,
- **kwargs)
- else:
- net = self.conn.create_network(name, shared, admin_state_up,
- external, provider,
- **kwargs)
- changed = True
- else:
- changed = False
- self.exit(changed=changed, network=net, id=net['id'])
-
- elif state == 'absent':
- if not net:
- self.exit(changed=False)
- else:
- self.conn.delete_network(name)
- self.exit(changed=True)
-
-
-def main():
- module = NetworkModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_networks_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_networks_info.py
deleted file mode 100644
index 251af3e72..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_networks_info.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: networks_info
-short_description: Retrieve information about one or more OpenStack networks.
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about one or more networks from OpenStack.
- - This module was called C(openstack.cloud.networks_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.networks_info) module no longer returns C(ansible_facts)!
-options:
- name:
- description:
- - Name or ID of the Network
- required: false
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- required: false
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Gather information about previously created networks
- openstack.cloud.networks_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- register: result
-
-- name: Show openstack networks
- debug:
- msg: "{{ result.openstack_networks }}"
-
-- name: Gather information about a previously created network by name
- openstack.cloud.networks_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- name: network1
- register: result
-
-- name: Show openstack networks
- debug:
- msg: "{{ result.openstack_networks }}"
-
-- name: Gather information about a previously created network with filter
- # Note: name and filters parameters are Not mutually exclusive
- openstack.cloud.networks_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- filters:
- tenant_id: 55e2ce24b2a245b09f181bf025724cbe
- subnets:
- - 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400
- - 443d4dc0-91d4-4998-b21c-357d10433483
- register: result
-
-- name: Show openstack networks
- debug:
- msg: "{{ result.openstack_networks }}"
-'''
-
-RETURN = '''
-openstack_networks:
- description: has all the openstack information about the networks
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the network.
- returned: success
- type: str
- status:
- description: Network status.
- returned: success
- type: str
- subnets:
- description: Subnet(s) included in this network.
- returned: success
- type: list
- elements: str
- tenant_id:
- description: Tenant id associated with this network.
- returned: success
- type: str
- shared:
- description: Network shared flag.
- returned: success
- type: bool
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class NetworkInfoModule(OpenStackModule):
-
- deprecated_names = ('networks_facts', 'openstack.cloud.networks_facts')
-
- argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None)
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
-
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
- if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
- networks = self.conn.search_networks(**kwargs)
-
- self.exit(changed=False, openstack_networks=networks)
-
-
-def main():
- module = NetworkInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_nova_flavor.py b/ansible_collections/openstack/cloud/plugins/modules/os_nova_flavor.py
deleted file mode 100644
index 8a993ca51..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_nova_flavor.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: compute_flavor
-short_description: Manage OpenStack compute flavors
-author: OpenStack Ansible SIG
-description:
- - Add or remove flavors from OpenStack.
-options:
- state:
- description:
- - Indicate desired state of the resource. When I(state) is 'present',
- then I(ram), I(vcpus), and I(disk) are all required. There are no
- default values for those parameters.
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Flavor name.
- required: true
- type: str
- ram:
- description:
- - Amount of memory, in MB.
- type: int
- vcpus:
- description:
- - Number of virtual CPUs.
- type: int
- disk:
- description:
- - Size of local disk, in GB.
- default: 0
- type: int
- ephemeral:
- description:
- - Ephemeral space size, in GB.
- default: 0
- type: int
- swap:
- description:
- - Swap space size, in MB.
- default: 0
- type: int
- rxtx_factor:
- description:
- - RX/TX factor.
- default: 1.0
- type: float
- is_public:
- description:
- - Make flavor accessible to the public.
- type: bool
- default: 'yes'
- flavorid:
- description:
- - ID for the flavor. This is optional as a unique UUID will be
- assigned if a value is not specified.
- default: "auto"
- type: str
- extra_specs:
- description:
- - Metadata dictionary
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
- openstack.cloud.compute_flavor:
- cloud: mycloud
- state: present
- name: tiny
- ram: 1024
- vcpus: 1
- disk: 10
- ephemeral: 10
-
-- name: "Delete 'tiny' flavor"
- openstack.cloud.compute_flavor:
- cloud: mycloud
- state: absent
- name: tiny
-
-- name: Create flavor with metadata
- openstack.cloud.compute_flavor:
- cloud: mycloud
- state: present
- name: tiny
- ram: 1024
- vcpus: 1
- disk: 10
- extra_specs:
- "quota:disk_read_iops_sec": 5000
- "aggregate_instance_extra_specs:pinned": false
-'''
-
-RETURN = '''
-flavor:
- description: Dictionary describing the flavor.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
- disk:
- description: Size of local disk, in GB.
- returned: success
- type: int
- sample: 10
- ephemeral:
- description: Ephemeral space size, in GB.
- returned: success
- type: int
- sample: 10
- ram:
- description: Amount of memory, in MB.
- returned: success
- type: int
- sample: 1024
- swap:
- description: Swap space size, in MB.
- returned: success
- type: int
- sample: 100
- vcpus:
- description: Number of virtual CPUs.
- returned: success
- type: int
- sample: 2
- is_public:
- description: Make flavor accessible to the public.
- returned: success
- type: bool
- sample: true
- extra_specs:
- description: Flavor metadata
- returned: success
- type: dict
- sample:
- "quota:disk_read_iops_sec": 5000
- "aggregate_instance_extra_specs:pinned": false
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ComputeFlavorModule(OpenStackModule):
- argument_spec = dict(
- state=dict(required=False, default='present',
- choices=['absent', 'present']),
- name=dict(required=True),
-
- # required when state is 'present'
- ram=dict(required=False, type='int'),
- vcpus=dict(required=False, type='int'),
-
- disk=dict(required=False, default=0, type='int'),
- ephemeral=dict(required=False, default=0, type='int'),
- swap=dict(required=False, default=0, type='int'),
- rxtx_factor=dict(required=False, default=1.0, type='float'),
- is_public=dict(required=False, default=True, type='bool'),
- flavorid=dict(required=False, default="auto"),
- extra_specs=dict(required=False, default=None, type='dict'),
- )
-
- module_kwargs = dict(
- required_if=[
- ('state', 'present', ['ram', 'vcpus', 'disk'])
- ],
- supports_check_mode=True
- )
-
- def _system_state_change(self, flavor):
- state = self.params['state']
- if state == 'present' and not flavor:
- return True
- if state == 'absent' and flavor:
- return True
- return False
-
- def run(self):
- state = self.params['state']
- name = self.params['name']
- extra_specs = self.params['extra_specs'] or {}
-
- flavor = self.conn.get_flavor(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(flavor))
-
- if state == 'present':
- old_extra_specs = {}
- require_update = False
-
- if flavor:
- old_extra_specs = flavor['extra_specs']
- if flavor['swap'] == "":
- flavor['swap'] = 0
- for param_key in ['ram', 'vcpus', 'disk', 'ephemeral',
- 'swap', 'rxtx_factor', 'is_public']:
- if self.params[param_key] != flavor[param_key]:
- require_update = True
- break
- flavorid = self.params['flavorid']
- if flavor and require_update:
- self.conn.delete_flavor(name)
- old_extra_specs = {}
- if flavorid == 'auto':
- flavorid = flavor['id']
- flavor = None
-
- if not flavor:
- flavor = self.conn.create_flavor(
- name=name,
- ram=self.params['ram'],
- vcpus=self.params['vcpus'],
- disk=self.params['disk'],
- flavorid=flavorid,
- ephemeral=self.params['ephemeral'],
- swap=self.params['swap'],
- rxtx_factor=self.params['rxtx_factor'],
- is_public=self.params['is_public']
- )
- changed = True
- else:
- changed = False
-
- new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
- unset_keys = set(old_extra_specs.keys()) - set(extra_specs.keys())
-
- if unset_keys and not require_update:
- self.conn.unset_flavor_specs(flavor['id'], unset_keys)
-
- if old_extra_specs != new_extra_specs:
- self.conn.set_flavor_specs(flavor['id'], extra_specs)
-
- changed = (changed or old_extra_specs != new_extra_specs)
-
- self.exit_json(
- changed=changed, flavor=flavor, id=flavor['id'])
-
- elif state == 'absent':
- if flavor:
- self.conn.delete_flavor(name)
- self.exit_json(changed=True)
- self.exit_json(changed=False)
-
-
-def main():
- module = ComputeFlavorModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_nova_host_aggregate.py b/ansible_collections/openstack/cloud/plugins/modules/os_nova_host_aggregate.py
deleted file mode 100644
index 4c95fd299..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_nova_host_aggregate.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/python
-# Copyright 2016 Jakub Jursa <jakub.jursa1@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: host_aggregate
-short_description: Manage OpenStack host aggregates
-author: OpenStack Ansible SIG
-description:
- - Create, update, or delete OpenStack host aggregates. If a aggregate
- with the supplied name already exists, it will be updated with the
- new name, new availability zone, new metadata and new list of hosts.
-options:
- name:
- description: Name of the aggregate.
- required: true
- type: str
- metadata:
- description: Metadata dict.
- type: dict
- availability_zone:
- description: Availability zone to create aggregate into.
- type: str
- hosts:
- description: List of hosts to set for an aggregate.
- type: list
- elements: str
- purge_hosts:
- description: Whether hosts not in I(hosts) should be removed from the aggregate
- type: bool
- default: true
- state:
- description: Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a host aggregate
-- openstack.cloud.host_aggregate:
- cloud: mycloud
- state: present
- name: db_aggregate
- hosts:
- - host1
- - host2
- metadata:
- type: dbcluster
-
-# Add an additional host to the aggregate
-- openstack.cloud.host_aggregate:
- cloud: mycloud
- state: present
- name: db_aggregate
- hosts:
- - host3
- purge_hosts: false
- metadata:
- type: dbcluster
-
-# Delete an aggregate
-- openstack.cloud.host_aggregate:
- cloud: mycloud
- state: absent
- name: db_aggregate
-'''
-
-RETURN = r'''
-aggregate:
- description: A host aggregate resource.
- type: complex
- returned: On success, when I(state) is present
- contains:
- availability_zone:
- description: Availability zone of the aggregate
- type: str
- returned: always
- deleted:
- description: Whether or not the resource is deleted
- type: bool
- returned: always
- hosts:
- description: Hosts belonging to the aggregate
- type: str
- returned: always
- id:
- description: The UUID of the aggregate.
- type: str
- returned: always
- metadata:
- description: Metadata attached to the aggregate
- type: str
- returned: always
- name:
- description: Name of the aggregate
- type: str
- returned: always
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ComputeHostAggregateModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- metadata=dict(required=False, default=None, type='dict'),
- availability_zone=dict(required=False, default=None),
- hosts=dict(required=False, default=None, type='list', elements='str'),
- purge_hosts=dict(default=True, type='bool'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _find_aggregate(self, name_or_id):
- aggregates = self.conn.search_aggregates(name_or_id=name_or_id)
- if len(aggregates) == 1:
- return aggregates[0]
- elif len(aggregates) == 0:
- return None
- raise Exception("Aggregate is not unique, this should be impossible")
-
- def _needs_update(self, aggregate):
- new_metadata = self.params['metadata'] or {}
-
- if self.params['availability_zone'] is not None:
- new_metadata['availability_zone'] = self.params['availability_zone']
-
- if self.params['hosts'] is not None:
- if self.params['purge_hosts']:
- if set(self.params['hosts']) != set(aggregate.hosts):
- return True
- else:
- intersection = set(self.params['hosts']).intersection(set(aggregate.hosts))
- if set(self.params['hosts']) != intersection:
- return True
-
- for param in ('availability_zone', 'metadata'):
- if self.params[param] is not None and \
- self.params[param] != aggregate[param]:
- return True
-
- return False
-
- def _system_state_change(self, aggregate):
- state = self.params['state']
- if state == 'absent' and aggregate:
- return True
-
- if state == 'present':
- if aggregate is None:
- return True
- return self._needs_update(aggregate)
-
- return False
-
- def _update_hosts(self, aggregate, hosts, purge_hosts):
- if hosts is None:
- return
-
- hosts_to_add = set(hosts) - set(aggregate['hosts'] or [])
- for host in hosts_to_add:
- self.conn.add_host_to_aggregate(aggregate.id, host)
-
- if not purge_hosts:
- return
-
- hosts_to_remove = set(aggregate["hosts"] or []) - set(hosts)
- for host in hosts_to_remove:
- self.conn.remove_host_from_aggregate(aggregate.id, host)
-
- def run(self):
- name = self.params['name']
- metadata = self.params['metadata']
- availability_zone = self.params['availability_zone']
- hosts = self.params['hosts']
- purge_hosts = self.params['purge_hosts']
- state = self.params['state']
-
- if metadata is not None:
- metadata.pop('availability_zone', None)
-
- aggregate = self._find_aggregate(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(aggregate))
-
- changed = False
- if state == 'present':
- if aggregate is None:
- aggregate = self.conn.create_aggregate(
- name=name, availability_zone=availability_zone)
- self._update_hosts(aggregate, hosts, False)
- if metadata:
- self.conn.set_aggregate_metadata(aggregate.id, metadata)
- changed = True
- elif self._needs_update(aggregate):
- if availability_zone is not None:
- aggregate = self.conn.update_aggregate(
- aggregate.id, name=name,
- availability_zone=availability_zone)
- if metadata is not None:
- metas = metadata
- for i in set(aggregate.metadata.keys() - set(metadata.keys())):
- if i != 'availability_zone':
- metas[i] = None
- self.conn.set_aggregate_metadata(aggregate.id, metas)
- self._update_hosts(aggregate, hosts, purge_hosts)
- changed = True
- aggregate = self._find_aggregate(name)
- self.exit_json(changed=changed, aggregate=aggregate)
-
- elif state == 'absent' and aggregate is not None:
- self._update_hosts(aggregate, [], True)
- self.conn.delete_aggregate(aggregate.id)
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = ComputeHostAggregateModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_object.py b/ansible_collections/openstack/cloud/plugins/modules/os_object.py
deleted file mode 100644
index 4a22604ed..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_object.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: object
-short_description: Create or Delete objects and containers from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Create or Delete objects and containers from OpenStack
-options:
- container:
- description:
- - The name of the container in which to create the object
- required: true
- type: str
- name:
- description:
- - Name to be give to the object. If omitted, operations will be on
- the entire container
- required: false
- type: str
- filename:
- description:
- - Path to local file to be uploaded.
- required: false
- type: str
- container_access:
- description:
- - desired container access level.
- required: false
- choices: ['private', 'public']
- default: private
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: "Create a object named 'fstab' in the 'config' container"
- openstack.cloud.object:
- cloud: mordred
- state: present
- name: fstab
- container: config
- filename: /etc/fstab
-
-- name: Delete a container called config and all of its contents
- openstack.cloud.object:
- cloud: rax-iad
- state: absent
- container: config
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class SwiftObjectModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=False, default=None),
- container=dict(required=True),
- filename=dict(required=False, default=None),
- container_access=dict(default='private', choices=['private', 'public']),
- state=dict(default='present', choices=['absent', 'present']),
- )
- module_kwargs = dict()
-
- def process_object(
- self, container, name, filename, container_access, **kwargs
- ):
- changed = False
- container_obj = self.conn.get_container(container)
- if kwargs['state'] == 'present':
- if not container_obj:
- container_obj = self.conn.create_container(container)
- changed = True
- if self.conn.get_container_access(container) != container_access:
- self.conn.set_container_access(container, container_access)
- changed = True
- if name:
- if self.conn.is_object_stale(container, name, filename):
- self.conn.create_object(container, name, filename)
- changed = True
- else:
- if container_obj:
- if name:
- if self.conn.get_object_metadata(container, name):
- self.conn.delete_object(container, name)
- changed = True
- else:
- self.conn.delete_container(container)
- changed = True
- return changed
-
- def run(self):
- changed = self.process_object(**self.params)
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = SwiftObjectModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_pool.py b/ansible_collections/openstack/cloud/plugins/modules/os_pool.py
deleted file mode 100644
index 6f73ea1ce..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_pool.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2018 Catalyst Cloud Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: lb_pool
-short_description: Add/Delete a pool in the load balancing service from OpenStack Cloud
-author: OpenStack Ansible SIG
-description:
- - Add or Remove a pool from the OpenStack load-balancer service.
-options:
- name:
- description:
- - Name that has to be given to the pool
- required: true
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- loadbalancer:
- description:
- - The name or id of the load balancer that this pool belongs to.
- Either loadbalancer or listener must be specified for pool creation.
- type: str
- listener:
- description:
- - The name or id of the listener that this pool belongs to.
- Either loadbalancer or listener must be specified for pool creation.
- type: str
- protocol:
- description:
- - The protocol for the pool.
- choices: [HTTP, HTTPS, PROXY, TCP, UDP]
- default: HTTP
- type: str
- lb_algorithm:
- description:
- - The load balancing algorithm for the pool.
- choices: [LEAST_CONNECTIONS, ROUND_ROBIN, SOURCE_IP]
- default: ROUND_ROBIN
- type: str
- wait:
- description:
- - If the module should wait for the pool to be ACTIVE.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the pool to get
- into ACTIVE state.
- default: 180
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-RETURN = '''
-id:
- description: The pool UUID.
- returned: On success when I(state) is 'present'
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
-listener:
- description: Dictionary describing the pool.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Unique UUID.
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
- name:
- description: Name given to the pool.
- type: str
- sample: "test"
- description:
- description: The pool description.
- type: str
- sample: "description"
- loadbalancers:
- description: A list of load balancer IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- listeners:
- description: A list of listener IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- members:
- description: A list of member IDs.
- type: list
- sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
- loadbalancer_id:
- description: The load balancer ID the pool belongs to. This field is set when the pool doesn't belong to any listener in the load balancer.
- type: str
- sample: "7c4be3f8-9c2f-11e8-83b3-44a8422643a4"
- listener_id:
- description: The listener ID the pool belongs to.
- type: str
- sample: "956aa716-9c2f-11e8-83b3-44a8422643a4"
- provisioning_status:
- description: The provisioning status of the pool.
- type: str
- sample: "ACTIVE"
- operating_status:
- description: The operating status of the pool.
- type: str
- sample: "ONLINE"
- is_admin_state_up:
- description: The administrative state of the pool.
- type: bool
- sample: true
- protocol:
- description: The protocol for the pool.
- type: str
- sample: "HTTP"
- lb_algorithm:
- description: The load balancing algorithm for the pool.
- type: str
- sample: "ROUND_ROBIN"
-'''
-
-EXAMPLES = '''
-# Create a pool, wait for the pool to be active.
-- openstack.cloud.lb_pool:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: test-pool
- loadbalancer: test-loadbalancer
- protocol: HTTP
- lb_algorithm: ROUND_ROBIN
-
-# Delete a pool
-- openstack.cloud.lb_pool:
- cloud: mycloud
- endpoint_type: admin
- state: absent
- name: test-pool
-'''
-
-import time
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class LoadbalancerPoolModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- loadbalancer=dict(default=None),
- listener=dict(default=None),
- protocol=dict(default='HTTP',
- choices=['HTTP', 'HTTPS', 'TCP', 'UDP', 'PROXY']),
- lb_algorithm=dict(
- default='ROUND_ROBIN',
- choices=['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']
- )
- )
- module_kwargs = dict(
- mutually_exclusive=[['loadbalancer', 'listener']]
- )
-
- def _wait_for_pool_status(self, pool_id, status, failures,
- interval=5):
- timeout = self.params['timeout']
-
- total_sleep = 0
- if failures is None:
- failures = []
-
- while total_sleep < timeout:
- pool = self.conn.load_balancer.get_pool(pool_id)
- provisioning_status = pool.provisioning_status
- if provisioning_status == status:
- return pool
- if provisioning_status in failures:
- self.fail_json(
- msg="pool %s transitioned to failure state %s" %
- (pool_id, provisioning_status)
- )
-
- time.sleep(interval)
- total_sleep += interval
-
- self.fail_json(
- msg="timeout waiting for pool %s to transition to %s" %
- (pool_id, status)
- )
-
- def run(self):
- loadbalancer = self.params['loadbalancer']
- listener = self.params['listener']
-
- changed = False
- pool = self.conn.load_balancer.find_pool(name_or_id=self.params['name'])
-
- if self.params['state'] == 'present':
- if not pool:
- loadbalancer_id = None
- if not (loadbalancer or listener):
- self.fail_json(
- msg="either loadbalancer or listener must be provided"
- )
-
- if loadbalancer:
- lb = self.conn.load_balancer.find_load_balancer(loadbalancer)
- if not lb:
- self.fail_json(
- msg='load balancer %s is not found' % loadbalancer)
- loadbalancer_id = lb.id
-
- listener_id = None
- if listener:
- listener_ret = self.conn.load_balancer.find_listener(listener)
- if not listener_ret:
- self.fail_json(
- msg='listener %s is not found' % listener)
- listener_id = listener_ret.id
-
- pool = self.conn.load_balancer.create_pool(
- name=self.params['name'],
- loadbalancer_id=loadbalancer_id,
- listener_id=listener_id,
- protocol=self.params['protocol'],
- lb_algorithm=self.params['lb_algorithm']
- )
- changed = True
-
- if not self.params['wait']:
- self.exit_json(
- changed=changed, pool=pool.to_dict(), id=pool.id)
-
- if self.params['wait']:
- pool = self._wait_for_pool_status(
- pool.id, "ACTIVE", ["ERROR"])
-
- self.exit_json(
- changed=changed, pool=pool.to_dict(), id=pool.id)
-
- elif self.params['state'] == 'absent':
- if pool:
- self.conn.load_balancer.delete_pool(pool)
- changed = True
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = LoadbalancerPoolModule()
- module()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_port.py b/ansible_collections/openstack/cloud/plugins/modules/os_port.py
deleted file mode 100644
index accef4fcc..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_port.py
+++ /dev/null
@@ -1,530 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: port
-short_description: Add/Update/Delete ports from an OpenStack cloud.
-author: OpenStack Ansible SIG
-description:
- - Add, Update or Remove ports from an OpenStack cloud. A I(state) of
- 'present' will ensure the port is created or updated if required.
-options:
- network:
- description:
- - Network ID or name this port belongs to.
- - Required when creating a new port.
- type: str
- name:
- description:
- - Name that has to be given to the port.
- type: str
- fixed_ips:
- description:
- - Desired IP and/or subnet for this port. Subnet is referenced by
- subnet_id and IP is referenced by ip_address.
- type: list
- elements: dict
- suboptions:
- ip_address:
- description: The fixed IP address to attempt to allocate.
- required: true
- type: str
- subnet_id:
- description: The subnet to attach the IP address to.
- type: str
- admin_state_up:
- description:
- - Sets admin state.
- type: bool
- mac_address:
- description:
- - MAC address of this port.
- type: str
- security_groups:
- description:
- - Security group(s) ID(s) or name(s) associated with the port (comma
- separated string or YAML list)
- type: list
- elements: str
- no_security_groups:
- description:
- - Do not associate a security group with this port.
- type: bool
- default: 'no'
- allowed_address_pairs:
- description:
- - "Allowed address pairs list. Allowed address pairs are supported with
- dictionary structure.
- e.g. allowed_address_pairs:
- - ip_address: 10.1.0.12
- mac_address: ab:cd:ef:12:34:56
- - ip_address: ..."
- type: list
- elements: dict
- suboptions:
- ip_address:
- description: The IP address.
- type: str
- mac_address:
- description: The MAC address.
- type: str
- extra_dhcp_opts:
- description:
- - "Extra dhcp options to be assigned to this port. Extra options are
- supported with dictionary structure. Note that options cannot be removed
- only updated.
- e.g. extra_dhcp_opts:
- - opt_name: opt name1
- opt_value: value1
- ip_version: 4
- - opt_name: ..."
- type: list
- elements: dict
- suboptions:
- opt_name:
- description: The name of the DHCP option to set.
- type: str
- required: true
- opt_value:
- description: The value of the DHCP option to set.
- type: str
- required: true
- ip_version:
- description: The IP version this DHCP option is for.
- type: int
- required: true
- device_owner:
- description:
- - The ID of the entity that uses this port.
- type: str
- device_id:
- description:
- - Device ID of device using this port.
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- vnic_type:
- description:
- - The type of the port that should be created
- choices: [normal, direct, direct-physical, macvtap, baremetal, virtio-forwarder]
- type: str
- port_security_enabled:
- description:
- - Whether to enable or disable the port security on the network.
- type: bool
- binding_profile:
- description:
- - Binding profile dict that the port should be created with.
- type: dict
- dns_name:
- description:
- - The dns name of the port ( only with dns-integration enabled )
- type: str
- dns_domain:
- description:
- - The dns domain of the port ( only with dns-integration enabled )
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a port
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- network: foo
-
-# Create a port with a static IP
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- network: foo
- fixed_ips:
- - ip_address: 10.1.0.21
-
-# Create a port with No security groups
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- network: foo
- no_security_groups: True
-
-# Update the existing 'port1' port with multiple security groups (version 1)
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
-
-# Update the existing 'port1' port with multiple security groups (version 2)
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- security_groups:
- - 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- - 057d4bdf-6d4d-472...
-
-# Create port of type 'direct'
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- network: foo
- vnic_type: direct
-
-# Create a port with binding profile
-- openstack.cloud.port:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: port1
- network: foo
- binding_profile:
- "pci_slot": "0000:03:11.1"
- "physical_network": "provider"
-'''
-
-RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: Name given to the port.
- returned: success
- type: str
-network_id:
- description: Network ID this port belongs in.
- returned: success
- type: str
-security_groups:
- description: Security group(s) associated with this port.
- returned: success
- type: list
-status:
- description: Port's status.
- returned: success
- type: str
-fixed_ips:
- description: Fixed ip(s) associated with this port.
- returned: success
- type: list
-tenant_id:
- description: Tenant id associated with this port.
- returned: success
- type: str
-allowed_address_pairs:
- description: Allowed address pairs with this port.
- returned: success
- type: list
-admin_state_up:
- description: Admin state up flag for this port.
- returned: success
- type: bool
-vnic_type:
- description: Type of the created port
- returned: success
- type: str
-port_security_enabled:
- description: Port security state on the network.
- returned: success
- type: bool
-binding:profile:
- description: Port binded profile
- returned: success
- type: dict
-'''
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-try:
- from collections import OrderedDict
- HAS_ORDEREDDICT = True
-except ImportError:
- try:
- from ordereddict import OrderedDict
- HAS_ORDEREDDICT = True
- except ImportError:
- HAS_ORDEREDDICT = False
-
-
-class NetworkPortModule(OpenStackModule):
- argument_spec = dict(
- network=dict(required=False),
- name=dict(required=False),
- fixed_ips=dict(type='list', default=None, elements='dict'),
- admin_state_up=dict(type='bool', default=None),
- mac_address=dict(default=None),
- security_groups=dict(default=None, type='list', elements='str'),
- no_security_groups=dict(default=False, type='bool'),
- allowed_address_pairs=dict(type='list', default=None, elements='dict'),
- extra_dhcp_opts=dict(type='list', default=None, elements='dict'),
- device_owner=dict(default=None),
- device_id=dict(default=None),
- state=dict(default='present', choices=['absent', 'present']),
- vnic_type=dict(default=None,
- choices=['normal', 'direct', 'direct-physical',
- 'macvtap', 'baremetal', 'virtio-forwarder']),
- port_security_enabled=dict(default=None, type='bool'),
- binding_profile=dict(default=None, type='dict'),
- dns_name=dict(type='str', default=None),
- dns_domain=dict(type='str', default=None)
- )
-
- module_kwargs = dict(
- mutually_exclusive=[
- ['no_security_groups', 'security_groups'],
- ],
- supports_check_mode=True
- )
-
- def _is_dns_integration_enabled(self):
- """ Check if dns-integraton is enabled """
- for ext in self.conn.network.extensions():
- if ext.alias == 'dns-integration':
- return True
- return False
-
- def _needs_update(self, port):
- """Check for differences in the updatable values.
-
- NOTE: We don't currently allow name updates.
- """
- compare_simple = ['admin_state_up',
- 'mac_address',
- 'device_owner',
- 'device_id',
- 'binding:vnic_type',
- 'port_security_enabled',
- 'binding:profile']
- compare_dns = ['dns_name', 'dns_domain']
- compare_list_dict = ['allowed_address_pairs',
- 'extra_dhcp_opts']
- compare_list = ['security_groups']
-
- if self.conn.has_service('dns') and \
- self._is_dns_integration_enabled():
- for key in compare_dns:
- if self.params[key] is not None and \
- self.params[key] != port[key]:
- return True
-
- for key in compare_simple:
- if self.params[key] is not None and self.params[key] != port[key]:
- return True
- for key in compare_list:
- if (
- self.params[key] is not None
- and set(self.params[key]) != set(port[key])
- ):
- return True
-
- for key in compare_list_dict:
- if not self.params[key]:
- if port.get(key):
- return True
-
- if self.params[key]:
- if not port.get(key):
- return True
-
- # sort dicts in list
- port_ordered = [OrderedDict(sorted(d.items())) for d in port[key]]
- param_ordered = [OrderedDict(sorted(d.items())) for d in self.params[key]]
-
- for d in param_ordered:
- if d not in port_ordered:
- return True
-
- for d in port_ordered:
- if d not in param_ordered:
- return True
-
- # NOTE: if port was created or updated with 'no_security_groups=True',
- # subsequent updates without 'no_security_groups' flag or
- # 'no_security_groups=False' and no specified 'security_groups', will not
- # result in an update to the port where the default security group is
- # applied.
- if self.params['no_security_groups'] and port['security_groups'] != []:
- return True
-
- if self.params['fixed_ips'] is not None:
- for item in self.params['fixed_ips']:
- if 'ip_address' in item:
- # if ip_address in request does not match any in existing port,
- # update is required.
- if not any(match['ip_address'] == item['ip_address']
- for match in port['fixed_ips']):
- return True
- if 'subnet_id' in item:
- return True
- for item in port['fixed_ips']:
- # if ip_address in existing port does not match any in request,
- # update is required.
- if not any(match.get('ip_address') == item['ip_address']
- for match in self.params['fixed_ips']):
- return True
-
- return False
-
- def _system_state_change(self, port):
- state = self.params['state']
- if state == 'present':
- if not port:
- return True
- return self._needs_update(port)
- if state == 'absent' and port:
- return True
- return False
-
- def _compose_port_args(self):
- port_kwargs = {}
- optional_parameters = ['name',
- 'fixed_ips',
- 'admin_state_up',
- 'mac_address',
- 'security_groups',
- 'allowed_address_pairs',
- 'extra_dhcp_opts',
- 'device_owner',
- 'device_id',
- 'binding:vnic_type',
- 'port_security_enabled',
- 'binding:profile']
-
- if self.conn.has_service('dns') and \
- self._is_dns_integration_enabled():
- optional_parameters.extend(['dns_name', 'dns_domain'])
-
- for optional_param in optional_parameters:
- if self.params[optional_param] is not None:
- port_kwargs[optional_param] = self.params[optional_param]
-
- if self.params['no_security_groups']:
- port_kwargs['security_groups'] = []
-
- return port_kwargs
-
- def get_security_group_id(self, security_group_name_or_id):
- security_group = self.conn.get_security_group(security_group_name_or_id)
- if not security_group:
- self.fail_json(msg="Security group: %s, was not found"
- % security_group_name_or_id)
- return security_group['id']
-
- def run(self):
- if not HAS_ORDEREDDICT:
- self.fail_json(msg=missing_required_lib('ordereddict'))
-
- name = self.params['name']
- state = self.params['state']
-
- if self.params['security_groups']:
- # translate security_groups to UUID's if names where provided
- self.params['security_groups'] = [
- self.get_security_group_id(v)
- for v in self.params['security_groups']
- ]
-
- # Neutron API accept 'binding:vnic_type' as an argument
- # for the port type.
- self.params['binding:vnic_type'] = self.params.pop('vnic_type')
- # Neutron API accept 'binding:profile' as an argument
- # for the port binding profile type.
- self.params['binding:profile'] = self.params.pop('binding_profile')
-
- port = None
- network_id = None
- if name:
- port = self.conn.get_port(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(port))
-
- changed = False
- if state == 'present':
- if not port:
- network = self.params['network']
- if not network:
- self.fail_json(
- msg="Parameter 'network' is required in Port Create"
- )
- port_kwargs = self._compose_port_args()
- network_object = self.conn.get_network(network)
-
- if network_object:
- network_id = network_object['id']
- else:
- self.fail_json(
- msg="Specified network was not found."
- )
-
- port_kwargs['network_id'] = network_id
- port = self.conn.network.create_port(**port_kwargs)
- changed = True
- else:
- if self._needs_update(port):
- port_kwargs = self._compose_port_args()
- port = self.conn.network.update_port(port['id'],
- **port_kwargs)
- changed = True
- self.exit_json(changed=changed, id=port['id'], port=port)
-
- if state == 'absent':
- if port:
- self.conn.delete_port(port['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = NetworkPortModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_port_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_port_info.py
deleted file mode 100644
index 0ed3f0599..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_port_info.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2016 IBM
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
-module: port_info
-short_description: Retrieve information about ports within OpenStack.
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about ports from OpenStack.
- - This module was called C(openstack.cloud.port_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.port_info) module no longer returns C(ansible_facts)!
-options:
- port:
- description:
- - Unique name or ID of a port.
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements
- of this dictionary will be matched against the returned port
- dictionaries. Matching is currently limited to strings within
- the port dictionary, or strings within nested dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about all ports
-- openstack.cloud.port_info:
- cloud: mycloud
- register: result
-
-- debug:
- msg: "{{ result.openstack_ports }}"
-
-# Gather information about a single port
-- openstack.cloud.port_info:
- cloud: mycloud
- port: 6140317d-e676-31e1-8a4a-b1913814a471
-
-# Gather information about all ports that have device_id set to a specific value
-# and with a status of ACTIVE.
-- openstack.cloud.port_info:
- cloud: mycloud
- filters:
- device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
- status: ACTIVE
-'''
-
-RETURN = '''
-openstack_ports:
- description: List of port dictionaries. A subset of the dictionary keys
- listed below may be returned, depending on your cloud provider.
- returned: always, but can be null
- type: complex
- contains:
- admin_state_up:
- description: The administrative state of the router, which is
- up (true) or down (false).
- returned: success
- type: bool
- sample: true
- allowed_address_pairs:
- description: A set of zero or more allowed address pairs. An
- address pair consists of an IP address and MAC address.
- returned: success
- type: list
- sample: []
- "binding:host_id":
- description: The UUID of the host where the port is allocated.
- returned: success
- type: str
- sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
- "binding:profile":
- description: A dictionary the enables the application running on
- the host to pass and receive VIF port-specific
- information to the plug-in.
- returned: success
- type: dict
- sample: {}
- "binding:vif_details":
- description: A dictionary that enables the application to pass
- information about functions that the Networking API
- provides.
- returned: success
- type: dict
- sample: {"port_filter": true}
- "binding:vif_type":
- description: The VIF type for the port.
- returned: success
- type: dict
- sample: "ovs"
- "binding:vnic_type":
- description: The virtual network interface card (vNIC) type that is
- bound to the neutron port.
- returned: success
- type: str
- sample: "normal"
- device_id:
- description: The UUID of the device that uses this port.
- returned: success
- type: str
- sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
- device_owner:
- description: The UUID of the entity that uses this port.
- returned: success
- type: str
- sample: "network:router_interface"
- dns_assignment:
- description: DNS assignment information.
- returned: success
- type: list
- dns_name:
- description: DNS name
- returned: success
- type: str
- sample: ""
- extra_dhcp_opts:
- description: A set of zero or more extra DHCP option pairs.
- An option pair consists of an option value and name.
- returned: success
- type: list
- sample: []
- fixed_ips:
- description: The IP addresses for the port. Includes the IP address
- and UUID of the subnet.
- returned: success
- type: list
- id:
- description: The UUID of the port.
- returned: success
- type: str
- sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
- ip_address:
- description: The IP address.
- returned: success
- type: str
- sample: "127.0.0.1"
- mac_address:
- description: The MAC address.
- returned: success
- type: str
- sample: "00:00:5E:00:53:42"
- name:
- description: The port name.
- returned: success
- type: str
- sample: "port_name"
- network_id:
- description: The UUID of the attached network.
- returned: success
- type: str
- sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
- port_security_enabled:
- description: The port security status. The status is enabled (true) or disabled (false).
- returned: success
- type: bool
- sample: false
- security_groups:
- description: The UUIDs of any attached security groups.
- returned: success
- type: list
- status:
- description: The port status.
- returned: success
- type: str
- sample: "ACTIVE"
- tenant_id:
- description: The UUID of the tenant who owns the network.
- returned: success
- type: str
- sample: "51fce036d7984ba6af4f6c849f65ef00"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class NetworkPortInfoModule(OpenStackModule):
- argument_spec = dict(
- port=dict(required=False),
- filters=dict(type='dict', required=False),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- deprecated_names = ('openstack.cloud.port_facts')
-
- def run(self):
- port = self.params.get('port')
- filters = self.params.get('filters')
-
- ports = self.conn.search_ports(port, filters)
- self.exit_json(changed=False, openstack_ports=ports)
-
-
-def main():
- module = NetworkPortInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_project.py b/ansible_collections/openstack/cloud/plugins/modules/os_project.py
deleted file mode 100644
index 9719452dc..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_project.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2015 IBM Corporation
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: project
-short_description: Manage OpenStack Projects
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack Projects. Projects can be created,
- updated or deleted using this module. A project will be updated
- if I(name) matches an existing project and I(state) is present.
- The value for I(name) cannot be updated without deleting and
- re-creating the project.
-options:
- name:
- description:
- - Name for the project
- required: true
- type: str
- description:
- description:
- - Description for the project
- type: str
- domain_id:
- description:
- - Domain id to create the project in if the cloud supports domains.
- aliases: ['domain']
- type: str
- enabled:
- description:
- - Is the project enabled
- type: bool
- default: 'yes'
- properties:
- description:
- - Additional properties to be associated with this project. Requires
- openstacksdk>0.45.
- type: dict
- required: false
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a project
-- openstack.cloud.project:
- cloud: mycloud
- endpoint_type: admin
- state: present
- name: demoproject
- description: demodescription
- domain_id: demoid
- enabled: True
- properties:
- internal_alias: demo_project
-
-# Delete a project
-- openstack.cloud.project:
- cloud: mycloud
- endpoint_type: admin
- state: absent
- name: demoproject
-'''
-
-
-RETURN = '''
-project:
- description: Dictionary describing the project.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Project ID
- type: str
- sample: "f59382db809c43139982ca4189404650"
- name:
- description: Project name
- type: str
- sample: "demoproject"
- description:
- description: Project description
- type: str
- sample: "demodescription"
- enabled:
- description: Boolean to indicate if project is enabled
- type: bool
- sample: True
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityProjectModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- description=dict(required=False),
- domain_id=dict(required=False, aliases=['domain']),
- properties=dict(required=False, type='dict', min_ver='0.45.1'),
- enabled=dict(default=True, type='bool'),
- state=dict(default='present', choices=['absent', 'present'])
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _needs_update(self, project):
- keys = ('description', 'enabled')
- for key in keys:
- if self.params[key] is not None and self.params[key] != project.get(key):
- return True
-
- properties = self.params['properties']
- if properties:
- project_properties = project.get('properties')
- for k, v in properties.items():
- if v is not None and (k not in project_properties or v != project_properties[k]):
- return True
-
- return False
-
- def _system_state_change(self, project):
- state = self.params['state']
- if state == 'present':
- if project is None:
- changed = True
- else:
- if self._needs_update(project):
- changed = True
- else:
- changed = False
-
- elif state == 'absent':
- changed = project is not None
-
- return changed
-
- def run(self):
- name = self.params['name']
- description = self.params['description']
- domain = self.params['domain_id']
- enabled = self.params['enabled']
- properties = self.params['properties'] or {}
- state = self.params['state']
-
- if domain:
- try:
- # We assume admin is passing domain id
- dom = self.conn.get_domain(domain)['id']
- domain = dom
- except Exception:
- # If we fail, maybe admin is passing a domain name.
- # Note that domains have unique names, just like id.
- try:
- dom = self.conn.search_domains(filters={'name': domain})[0]['id']
- domain = dom
- except Exception:
- # Ok, let's hope the user is non-admin and passing a sane id
- pass
-
- if domain:
- project = self.conn.get_project(name, domain_id=domain)
- else:
- project = self.conn.get_project(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(project))
-
- if state == 'present':
- if project is None:
- project = self.conn.create_project(
- name=name, description=description,
- domain_id=domain,
- enabled=enabled)
- changed = True
-
- project = self.conn.update_project(
- project['id'],
- description=description,
- enabled=enabled,
- **properties)
- else:
- if self._needs_update(project):
- project = self.conn.update_project(
- project['id'],
- description=description,
- enabled=enabled,
- **properties)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, project=project)
-
- elif state == 'absent':
- if project is None:
- changed = False
- else:
- self.conn.delete_project(project['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityProjectModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_project_access.py b/ansible_collections/openstack/cloud/plugins/modules/os_project_access.py
deleted file mode 100644
index c49a84495..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_project_access.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: project_access
-short_description: Manage OpenStack compute flavors access
-author: OpenStack Ansible SIG
-description:
- - Add or remove flavor, volume_type or other resources access
- from OpenStack.
-options:
- state:
- description:
- - Indicate desired state of the resource.
- choices: ['present', 'absent']
- required: false
- default: present
- type: str
- target_project_id:
- description:
- - Project id.
- required: true
- type: str
- resource_type:
- description:
- - The resource type (eg. nova_flavor, cinder_volume_type).
- required: true
- type: str
- resource_name:
- description:
- - The resource name (eg. tiny).
- required: true
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: "Enable access to tiny flavor to your tenant."
- openstack.cloud.project_access:
- cloud: mycloud
- state: present
- target_project_id: f0f1f2f3f4f5f67f8f9e0e1
- resource_name: tiny
- resource_type: nova_flavor
-
-
-- name: "Disable access to the given flavor to project"
- openstack.cloud.project_access:
- cloud: mycloud
- state: absent
- target_project_id: f0f1f2f3f4f5f67f8f9e0e1
- resource_name: tiny
- resource_type: nova_flavor
-'''
-
-RETURN = '''
-flavor:
- description: Dictionary describing the flavor.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
-
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityProjectAccess(OpenStackModule):
- argument_spec = dict(
- state=dict(required=False, default='present',
- choices=['absent', 'present']),
- target_project_id=dict(required=True, type='str'),
- resource_type=dict(required=True, type='str'),
- resource_name=dict(required=True, type='str'),
- )
-
- module_kwargs = dict(
- supports_check_mode=True,
- required_if=[
- ('state', 'present', ['target_project_id'])
- ]
- )
-
- def run(self):
- state = self.params['state']
- resource_name = self.params['resource_name']
- resource_type = self.params['resource_type']
- target_project_id = self.params['target_project_id']
-
- if resource_type == 'nova_flavor':
- # returns Munch({'NAME_ATTR': 'name',
- # 'tenant_id': u'37e55da59ec842649d84230f3a24eed5',
- # 'HUMAN_ID': False,
- # 'flavor_id': u'6d4d37b9-0480-4a8c-b8c9-f77deaad73f9',
- # 'request_ids': [], 'human_id': None}),
- _get_resource = self.conn.get_flavor
- _list_resource_access = self.conn.list_flavor_access
- _add_resource_access = self.conn.add_flavor_access
- _remove_resource_access = self.conn.remove_flavor_access
- elif resource_type == 'cinder_volume_type':
- # returns [Munch({
- # 'project_id': u'178cdb9955b047eea7afbe582038dc94',
- # 'properties': {'request_ids': [], 'NAME_ATTR': 'name',
- # 'human_id': None,
- # 'HUMAN_ID': False},
- # 'id': u'd5573023-b290-42c8-b232-7c5ca493667f'}),
- _get_resource = self.conn.get_volume_type
- _list_resource_access = self.conn.get_volume_type_access
- _add_resource_access = self.conn.add_volume_type_access
- _remove_resource_access = self.conn.remove_volume_type_access
- else:
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Not implemented.")
-
- resource = _get_resource(resource_name)
- if not resource:
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Not found.")
- resource_id = getattr(resource, 'id', resource['id'])
- # _list_resource_access returns a list of dicts containing 'project_id'
- acls = _list_resource_access(resource_id)
-
- if not all(acl.get('project_id') for acl in acls):
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Missing project_id in resource output.")
- allowed_tenants = [acl['project_id'] for acl in acls]
-
- changed_access = any((
- state == 'present' and target_project_id not in allowed_tenants,
- state == 'absent' and target_project_id in allowed_tenants
- ))
- if self.ansible.check_mode or not changed_access:
- self.exit_json(
- changed=changed_access, resource=resource, id=resource_id)
-
- if state == 'present':
- _add_resource_access(
- resource_id, target_project_id
- )
- elif state == 'absent':
- _remove_resource_access(
- resource_id, target_project_id
- )
-
- self.exit_json(
- changed=True, resource=resource, id=resource_id)
-
-
-def main():
- module = IdentityProjectAccess()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_project_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_project_info.py
deleted file mode 100644
index fb1e2767d..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_project_info.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: project_info
-short_description: Retrieve information about one or more OpenStack projects
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about a one or more OpenStack projects
- - This module was called C(openstack.cloud.project_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.project_info) module no longer returns C(ansible_facts)!
-options:
- name:
- description:
- - Name or ID of the project
- type: str
- domain:
- description:
- - Name or ID of the domain containing the project if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about previously created projects
-- openstack.cloud.project_info:
- cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-
-# Gather information about a previously created project by name
-- openstack.cloud.project_info:
- cloud: awesomecloud
- name: demoproject
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-
-# Gather information about a previously created project in a specific domain
-- openstack.cloud.project_info:
- cloud: awesomecloud
- name: demoproject
- domain: admindomain
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-
-# Gather information about a previously created project in a specific domain with filter
-- openstack.cloud.project_info:
- cloud: awesomecloud
- name: demoproject
- domain: admindomain
- filters:
- enabled: False
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-'''
-
-
-RETURN = '''
-openstack_projects:
- description: has all the OpenStack information about projects
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the project.
- returned: success
- type: str
- description:
- description: Description of the project
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the project is enabled
- returned: success
- type: bool
- domain_id:
- description: Domain ID containing the project (keystone v3 clouds only)
- returned: success
- type: bool
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityProjectInfoModule(OpenStackModule):
- deprecated_names = ('project_facts', 'openstack.cloud.project_facts')
-
- argument_spec = dict(
- name=dict(required=False),
- domain=dict(required=False),
- filters=dict(required=False, type='dict'),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
- name = self.params['name']
- domain = self.params['domain']
- filters = self.params['filters']
- is_old_facts = self.module_name == 'openstack.cloud.project_facts'
-
- if domain:
- try:
- # We assume admin is passing domain id
- dom = self.conn.get_domain(domain)['id']
- domain = dom
- except Exception:
- # If we fail, maybe admin is passing a domain name.
- # Note that domains have unique names, just like id.
- dom = self.conn.search_domains(filters={'name': domain})
- if dom:
- domain = dom[0]['id']
- else:
- self.fail_json(msg='Domain name or ID does not exist')
-
- if not filters:
- filters = {}
-
- filters['domain_id'] = domain
-
- projects = self.conn.search_projects(name, filters)
- if is_old_facts:
- self.exit_json(changed=False, ansible_facts=dict(
- openstack_projects=projects))
- else:
- self.exit_json(changed=False, openstack_projects=projects)
-
-
-def main():
- module = IdentityProjectInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_quota.py b/ansible_collections/openstack/cloud/plugins/modules/os_quota.py
deleted file mode 100644
index 0d6a4f04c..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_quota.py
+++ /dev/null
@@ -1,466 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Pason System Corporation
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: quota
-short_description: Manage OpenStack Quotas
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack Quotas. Quotas can be created,
- updated or deleted using this module. A quota will be updated
- if matches an existing project and is present.
-options:
- name:
- description:
- - Name of the OpenStack Project to manage.
- required: true
- type: str
- state:
- description:
- - A value of present sets the quota and a value of absent resets the quota to system defaults.
- default: present
- type: str
- choices: ['absent', 'present']
- backup_gigabytes:
- description: Maximum size of backups in GB's.
- type: int
- backups:
- description: Maximum number of backups allowed.
- type: int
- cores:
- description: Maximum number of CPU's per project.
- type: int
- fixed_ips:
- description: Number of fixed IP's to allow.
- type: int
- floating_ips:
- description: Number of floating IP's to allow in Compute.
- aliases: ['compute_floating_ips']
- type: int
- floatingip:
- description: Number of floating IP's to allow in Network.
- aliases: ['network_floating_ips']
- type: int
- gigabytes:
- description: Maximum volume storage allowed for project.
- type: int
- gigabytes_types:
- description:
- - Per driver volume storage quotas. Keys should be
- prefixed with C(gigabytes_) values should be ints.
- type: dict
- injected_file_size:
- description: Maximum file size in bytes.
- type: int
- injected_files:
- description: Number of injected files to allow.
- type: int
- injected_path_size:
- description: Maximum path size.
- type: int
- instances:
- description: Maximum number of instances allowed.
- type: int
- key_pairs:
- description: Number of key pairs to allow.
- type: int
- loadbalancer:
- description: Number of load balancers to allow.
- type: int
- metadata_items:
- description: Number of metadata items allowed per instance.
- type: int
- network:
- description: Number of networks to allow.
- type: int
- per_volume_gigabytes:
- description: Maximum size in GB's of individual volumes.
- type: int
- pool:
- description: Number of load balancer pools to allow.
- type: int
- port:
- description: Number of Network ports to allow, this needs to be greater than the instances limit.
- type: int
- properties:
- description: Number of properties to allow.
- type: int
- ram:
- description: Maximum amount of ram in MB to allow.
- type: int
- rbac_policy:
- description: Number of policies to allow.
- type: int
- router:
- description: Number of routers to allow.
- type: int
- security_group_rule:
- description: Number of rules per security group to allow.
- type: int
- security_group:
- description: Number of security groups to allow.
- type: int
- server_group_members:
- description: Number of server group members to allow.
- type: int
- server_groups:
- description: Number of server groups to allow.
- type: int
- snapshots:
- description: Number of snapshots to allow.
- type: int
- snapshots_types:
- description:
- - Per-driver volume snapshot quotas. Keys should be
- prefixed with C(snapshots_) values should be ints.
- type: dict
- subnet:
- description: Number of subnets to allow.
- type: int
- subnetpool:
- description: Number of subnet pools to allow.
- type: int
- volumes:
- description: Number of volumes to allow.
- type: int
- volumes_types:
- description:
- - Per-driver volume count quotas. Keys should be
- prefixed with C(volumes_) values should be ints.
- type: dict
- project:
- description: Unused, kept for compatability
- type: int
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.13.0"
- - "keystoneauth1 >= 3.4.0"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# List a Project Quota
-- openstack.cloud.quota:
- cloud: mycloud
- name: demoproject
-
-# Set a Project back to the defaults
-- openstack.cloud.quota:
- cloud: mycloud
- name: demoproject
- state: absent
-
-# Update a Project Quota for cores
-- openstack.cloud.quota:
- cloud: mycloud
- name: demoproject
- cores: 100
-
-# Update a Project Quota
-- openstack.cloud.quota:
- name: demoproject
- cores: 1000
- volumes: 20
- volumes_type:
- - volume_lvm: 10
-
-# Complete example based on list of projects
-- name: Update quotas
- openstack.cloud.quota:
- name: "{{ item.name }}"
- backup_gigabytes: "{{ item.backup_gigabytes }}"
- backups: "{{ item.backups }}"
- cores: "{{ item.cores }}"
- fixed_ips: "{{ item.fixed_ips }}"
- floating_ips: "{{ item.floating_ips }}"
- floatingip: "{{ item.floatingip }}"
- gigabytes: "{{ item.gigabytes }}"
- injected_file_size: "{{ item.injected_file_size }}"
- injected_files: "{{ item.injected_files }}"
- injected_path_size: "{{ item.injected_path_size }}"
- instances: "{{ item.instances }}"
- key_pairs: "{{ item.key_pairs }}"
- loadbalancer: "{{ item.loadbalancer }}"
- metadata_items: "{{ item.metadata_items }}"
- per_volume_gigabytes: "{{ item.per_volume_gigabytes }}"
- pool: "{{ item.pool }}"
- port: "{{ item.port }}"
- properties: "{{ item.properties }}"
- ram: "{{ item.ram }}"
- security_group_rule: "{{ item.security_group_rule }}"
- security_group: "{{ item.security_group }}"
- server_group_members: "{{ item.server_group_members }}"
- server_groups: "{{ item.server_groups }}"
- snapshots: "{{ item.snapshots }}"
- volumes: "{{ item.volumes }}"
- volumes_types:
- volumes_lvm: "{{ item.volumes_lvm }}"
- snapshots_types:
- snapshots_lvm: "{{ item.snapshots_lvm }}"
- gigabytes_types:
- gigabytes_lvm: "{{ item.gigabytes_lvm }}"
- with_items:
- - "{{ projects }}"
- when: item.state == "present"
-'''
-
-RETURN = '''
-openstack_quotas:
- description: Dictionary describing the project quota.
- returned: Regardless if changes where made or not
- type: dict
- sample:
- openstack_quotas: {
- compute: {
- cores: 150,
- fixed_ips: -1,
- floating_ips: 10,
- injected_file_content_bytes: 10240,
- injected_file_path_bytes: 255,
- injected_files: 5,
- instances: 100,
- key_pairs: 100,
- metadata_items: 128,
- ram: 153600,
- security_group_rules: 20,
- security_groups: 10,
- server_group_members: 10,
- server_groups: 10
- },
- network: {
- floatingip: 50,
- loadbalancer: 10,
- network: 10,
- pool: 10,
- port: 160,
- rbac_policy: 10,
- router: 10,
- security_group: 10,
- security_group_rule: 100,
- subnet: 10,
- subnetpool: -1
- },
- volume: {
- backup_gigabytes: 1000,
- backups: 10,
- gigabytes: 1000,
- gigabytes_lvm: -1,
- per_volume_gigabytes: -1,
- snapshots: 10,
- snapshots_lvm: -1,
- volumes: 10,
- volumes_lvm: -1
- }
- }
-
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class QuotaModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- backup_gigabytes=dict(required=False, type='int', default=None),
- backups=dict(required=False, type='int', default=None),
- cores=dict(required=False, type='int', default=None),
- fixed_ips=dict(required=False, type='int', default=None),
- floating_ips=dict(required=False, type='int', default=None, aliases=['compute_floating_ips']),
- floatingip=dict(required=False, type='int', default=None, aliases=['network_floating_ips']),
- gigabytes=dict(required=False, type='int', default=None),
- gigabytes_types=dict(required=False, type='dict', default={}),
- injected_file_size=dict(required=False, type='int', default=None),
- injected_files=dict(required=False, type='int', default=None),
- injected_path_size=dict(required=False, type='int', default=None),
- instances=dict(required=False, type='int', default=None),
- key_pairs=dict(required=False, type='int', default=None, no_log=False),
- loadbalancer=dict(required=False, type='int', default=None),
- metadata_items=dict(required=False, type='int', default=None),
- network=dict(required=False, type='int', default=None),
- per_volume_gigabytes=dict(required=False, type='int', default=None),
- pool=dict(required=False, type='int', default=None),
- port=dict(required=False, type='int', default=None),
- project=dict(required=False, type='int', default=None),
- properties=dict(required=False, type='int', default=None),
- ram=dict(required=False, type='int', default=None),
- rbac_policy=dict(required=False, type='int', default=None),
- router=dict(required=False, type='int', default=None),
- security_group_rule=dict(required=False, type='int', default=None),
- security_group=dict(required=False, type='int', default=None),
- server_group_members=dict(required=False, type='int', default=None),
- server_groups=dict(required=False, type='int', default=None),
- snapshots=dict(required=False, type='int', default=None),
- snapshots_types=dict(required=False, type='dict', default={}),
- subnet=dict(required=False, type='int', default=None),
- subnetpool=dict(required=False, type='int', default=None),
- volumes=dict(required=False, type='int', default=None),
- volumes_types=dict(required=False, type='dict', default={})
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _get_volume_quotas(self, project):
- return self.conn.get_volume_quotas(project)
-
- def _get_network_quotas(self, project):
- return self.conn.get_network_quotas(project)
-
- def _get_compute_quotas(self, project):
- return self.conn.get_compute_quotas(project)
-
- def _get_quotas(self, project):
- quota = {}
- try:
- quota['volume'] = self._get_volume_quotas(project)
- except Exception:
- self.warn("No public endpoint for volumev2 service was found. Ignoring volume quotas.")
-
- try:
- quota['network'] = self._get_network_quotas(project)
- except Exception:
- self.warn("No public endpoint for network service was found. Ignoring network quotas.")
-
- quota['compute'] = self._get_compute_quotas(project)
-
- for quota_type in quota.keys():
- quota[quota_type] = self._scrub_results(quota[quota_type])
-
- return quota
-
- def _scrub_results(self, quota):
- filter_attr = [
- 'HUMAN_ID',
- 'NAME_ATTR',
- 'human_id',
- 'request_ids',
- 'x_openstack_request_ids',
- ]
-
- for attr in filter_attr:
- if attr in quota:
- del quota[attr]
-
- return quota
-
- def _system_state_change_details(self, project_quota_output):
- quota_change_request = {}
- changes_required = False
-
- for quota_type in project_quota_output.keys():
- for quota_option in project_quota_output[quota_type].keys():
- if quota_option in self.params and self.params[quota_option] is not None:
- if project_quota_output[quota_type][quota_option] != self.params[quota_option]:
- changes_required = True
-
- if quota_type not in quota_change_request:
- quota_change_request[quota_type] = {}
-
- quota_change_request[quota_type][quota_option] = self.params[quota_option]
-
- return (changes_required, quota_change_request)
-
- def _system_state_change(self, project_quota_output):
- """
- Determine if changes are required to the current project quota.
-
- This is done by comparing the current project_quota_output against
- the desired quota settings set on the module params.
- """
-
- changes_required, quota_change_request = self._system_state_change_details(
- project_quota_output
- )
-
- if changes_required:
- return True
- else:
- return False
-
- def run(self):
- cloud_params = dict(self.params)
-
- # In order to handle the different volume types we update module params after.
- dynamic_types = [
- 'gigabytes_types',
- 'snapshots_types',
- 'volumes_types',
- ]
-
- for dynamic_type in dynamic_types:
- for k, v in self.params[dynamic_type].items():
- self.params[k] = int(v)
-
- # Get current quota values
- project_quota_output = self._get_quotas(cloud_params['name'])
- changes_required = False
-
- if self.params['state'] == "absent":
- # If a quota state is set to absent we should assume there will be changes.
- # The default quota values are not accessible so we can not determine if
- # no changes will occur or not.
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- # Calling delete_network_quotas when a quota has not been set results
- # in an error, according to the sdk docs it should return the
- # current quota.
- # The following error string is returned:
- # network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
- neutron_msg1 = "network client call failed: Quota for tenant"
- neutron_msg2 = "could not be found"
-
- for quota_type in project_quota_output.keys():
- quota_call = getattr(self.conn, 'delete_%s_quotas' % (quota_type))
- try:
- quota_call(cloud_params['name'])
- except Exception as e:
- error_msg = str(e)
- if error_msg.find(neutron_msg1) > -1 and error_msg.find(neutron_msg2) > -1:
- pass
- else:
- self.fail_json(msg=str(e), extra_data=e.extra_data)
-
- project_quota_output = self._get_quotas(cloud_params['name'])
- changes_required = True
-
- elif self.params['state'] == "present":
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(
- project_quota_output))
-
- changes_required, quota_change_request = self._system_state_change_details(
- project_quota_output
- )
-
- if changes_required:
- for quota_type in quota_change_request.keys():
- quota_call = getattr(self.conn, 'set_%s_quotas' % (quota_type))
- quota_call(cloud_params['name'], **quota_change_request[quota_type])
-
- # Get quota state post changes for validation
- project_quota_update = self._get_quotas(cloud_params['name'])
-
- if project_quota_output == project_quota_update:
- self.fail_json(msg='Could not apply quota update')
-
- project_quota_output = project_quota_update
-
- self.exit_json(
- changed=changes_required, openstack_quotas=project_quota_output)
-
-
-def main():
- module = QuotaModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_recordset.py b/ansible_collections/openstack/cloud/plugins/modules/os_recordset.py
deleted file mode 100644
index 921d6efaa..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_recordset.py
+++ /dev/null
@@ -1,260 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Hewlett-Packard Enterprise
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: recordset
-short_description: Manage OpenStack DNS recordsets
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
- updated. Only the I(records), I(description), and I(ttl) values
- can be updated.
-options:
- description:
- description:
- - Description of the recordset
- type: str
- name:
- description:
- - Name of the recordset. It must be ended with name of dns zone.
- required: true
- type: str
- records:
- description:
- - List of recordset definitions.
- - Required when I(state=present).
- type: list
- elements: str
- recordset_type:
- description:
- - Recordset type
- - Required when I(state=present).
- choices: ['a', 'aaaa', 'mx', 'cname', 'txt', 'ns', 'srv', 'ptr', 'caa']
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- ttl:
- description:
- - TTL (Time To Live) value in seconds
- type: int
- zone:
- description:
- - Name or ID of the zone which manages the recordset
- required: true
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a recordset named "www.example.net."
-- openstack.cloud.recordset:
- cloud: mycloud
- state: present
- zone: example.net.
- name: www.example.net.
- recordset_type: "a"
- records: ['10.1.1.1']
- description: test recordset
- ttl: 3600
-
-# Update the TTL on existing "www.example.net." recordset
-- openstack.cloud.recordset:
- cloud: mycloud
- state: present
- zone: example.net.
- name: www.example.net.
- recordset_type: "a"
- records: ['10.1.1.1']
- ttl: 7200
-
-# Delete recordset named "www.example.net."
-- openstack.cloud.recordset:
- cloud: mycloud
- state: absent
- zone: example.net.
- name: www.example.net.
-'''
-
-RETURN = '''
-recordset:
- description: Dictionary describing the recordset.
- returned: On success when I(state) is 'present'.
- type: dict
- contains:
- action:
- description: Current action in progress on the resource
- type: str
- returned: always
- created_at:
- description: Timestamp when the zone was created
- type: str
- returned: always
- description:
- description: Recordset description
- type: str
- sample: "Test description"
- returned: always
- id:
- description: Unique recordset ID
- type: str
- sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
- links:
- description: Links related to the resource
- type: dict
- returned: always
- name:
- description: Recordset name
- type: str
- sample: "www.example.net."
- returned: always
- project_id:
- description: ID of the proect to which the recordset belongs
- type: str
- returned: always
- records:
- description: Recordset records
- type: list
- sample: ['10.0.0.1']
- returned: always
- status:
- description:
- - Recordset status
- - Valid values include `PENDING_CREATE`, `ACTIVE`,`PENDING_DELETE`,
- `ERROR`
- type: str
- returned: always
- ttl:
- description: Zone TTL value
- type: int
- sample: 3600
- returned: always
- type:
- description:
- - Recordset type
- - Valid values include `A`, `AAAA`, `MX`, `CNAME`, `TXT`, `NS`,
- `SSHFP`, `SPF`, `SRV`, `PTR`
- type: str
- sample: "A"
- returned: always
- zone_id:
- description: The id of the Zone which this recordset belongs to
- type: str
- sample: 9508e177-41d8-434e-962c-6fe6ca880af7
- returned: always
- zone_name:
- description: The name of the Zone which this recordset belongs to
- type: str
- sample: "example.com."
- returned: always
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class DnsRecordsetModule(OpenStackModule):
- argument_spec = dict(
- description=dict(required=False, default=None),
- name=dict(required=True),
- records=dict(required=False, type='list', elements='str'),
- recordset_type=dict(required=False, choices=['a', 'aaaa', 'mx', 'cname', 'txt', 'ns', 'srv', 'ptr', 'caa']),
- state=dict(default='present', choices=['absent', 'present']),
- ttl=dict(required=False, type='int'),
- zone=dict(required=True),
- )
-
- module_kwargs = dict(
- required_if=[
- ('state', 'present',
- ['recordset_type', 'records'])],
- supports_check_mode=True
- )
-
- module_min_sdk_version = '0.28.0'
-
- def _needs_update(self, params, recordset):
- for k in ('description', 'records', 'ttl'):
- if k not in params:
- continue
- if params[k] is not None and params[k] != recordset[k]:
- return True
- return False
-
- def _system_state_change(self, state, recordset):
- if state == 'present':
- if recordset is None:
- return True
- kwargs = self._build_params()
- return self._needs_update(kwargs, recordset)
- if state == 'absent' and recordset:
- return True
- return False
-
- def _build_params(self):
- recordset_type = self.params['recordset_type']
- records = self.params['records']
- description = self.params['description']
- ttl = self.params['ttl']
- params = {
- 'description': description,
- 'records': records,
- 'type': recordset_type.upper(),
- 'ttl': ttl,
- }
- return {k: v for k, v in params.items() if v is not None}
-
- def run(self):
- zone = self.params.get('zone')
- name = self.params.get('name')
- state = self.params.get('state')
- ttl = self.params.get('ttl')
-
- recordsets = self.conn.search_recordsets(zone, name_or_id=name)
-
- recordset = None
- if recordsets:
- recordset = recordsets[0]
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, recordset))
-
- changed = False
- if state == 'present':
- kwargs = self._build_params()
- if recordset is None:
- kwargs['ttl'] = ttl or 300
- type = kwargs.pop('type', None)
- if type is not None:
- kwargs['recordset_type'] = type
- recordset = self.conn.create_recordset(zone=zone, name=name,
- **kwargs)
- changed = True
- elif self._needs_update(kwargs, recordset):
- type = kwargs.pop('type', None)
- recordset = self.conn.update_recordset(zone, recordset['id'],
- **kwargs)
- changed = True
- self.exit_json(changed=changed, recordset=recordset)
- elif state == 'absent' and recordset is not None:
- self.conn.delete_recordset(zone, recordset['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = DnsRecordsetModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_router.py b/ansible_collections/openstack/cloud/plugins/modules/os_router.py
deleted file mode 100644
index 58c5c124e..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_router.py
+++ /dev/null
@@ -1,571 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: router
-short_description: Create or delete routers from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Create or Delete routers from OpenStack. Although Neutron allows
- routers to share the same name, this module enforces name uniqueness
- to be more user friendly.
-options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Name to be give to the router
- required: true
- type: str
- admin_state_up:
- description:
- - Desired admin state of the created or existing router.
- type: bool
- default: 'yes'
- enable_snat:
- description:
- - Enable Source NAT (SNAT) attribute.
- type: bool
- network:
- description:
- - Unique name or ID of the external gateway network.
- - required I(interfaces) or I(enable_snat) are provided.
- type: str
- project:
- description:
- - Unique name or ID of the project.
- type: str
- external_fixed_ips:
- description:
- - The IP address parameters for the external gateway network. Each
- is a dictionary with the subnet name or ID (subnet) and the IP
- address to assign on the subnet (ip). If no IP is specified,
- one is automatically assigned from that subnet.
- type: list
- elements: dict
- suboptions:
- ip:
- description: The fixed IP address to attempt to allocate.
- required: true
- type: str
- subnet:
- description: The subnet to attach the IP address to.
- type: str
- interfaces:
- description:
- - List of subnets to attach to the router internal interface. Default
- gateway associated with the subnet will be automatically attached
- with the router's internal interface.
- In order to provide an ip address different from the default
- gateway,parameters are passed as dictionary with keys as network
- name or ID (I(net)), subnet name or ID (I(subnet)) and the IP of
- port (I(portip)) from the network.
- User defined portip is often required when a multiple router need
- to be connected to a single subnet for which the default gateway has
- been already used.
- type: list
- elements: raw
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a simple router, not attached to a gateway or subnets.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: simple_router
-
-# Create a simple router, not attached to a gateway or subnets for a given project.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: simple_router
- project: myproj
-
-# Creates a router attached to ext_network1 on an IPv4 subnet and one
-# internal subnet interface.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: router1
- network: ext_network1
- external_fixed_ips:
- - subnet: public-subnet
- ip: 172.24.4.2
- interfaces:
- - private-subnet
-
-# Create another router with two internal subnet interfaces.One with user defined port
-# ip and another with default gateway.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: router2
- network: ext_network1
- interfaces:
- - net: private-net
- subnet: private-subnet
- portip: 10.1.1.10
- - project-subnet
-
-# Create another router with two internal subnet interface.One with user defined port
-# ip and and another with default gateway.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: router2
- network: ext_network1
- interfaces:
- - net: private-net
- subnet: private-subnet
- portip: 10.1.1.10
- - project-subnet
-
-# Create another router with two internal subnet interface. one with user defined port
-# ip and and another with default gateway.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: router2
- network: ext_network1
- interfaces:
- - net: private-net
- subnet: private-subnet
- portip: 10.1.1.10
- - project-subnet
-
-# Update existing router1 external gateway to include the IPv6 subnet.
-# Note that since 'interfaces' is not provided, any existing internal
-# interfaces on an existing router will be left intact.
-- openstack.cloud.router:
- cloud: mycloud
- state: present
- name: router1
- network: ext_network1
- external_fixed_ips:
- - subnet: public-subnet
- ip: 172.24.4.2
- - subnet: ipv6-public-subnet
- ip: 2001:db8::3
-
-# Delete router1
-- openstack.cloud.router:
- cloud: mycloud
- state: absent
- name: router1
-'''
-
-RETURN = '''
-router:
- description: Dictionary describing the router.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Router ID.
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
- name:
- description: Router name.
- type: str
- sample: "router1"
- admin_state_up:
- description: Administrative state of the router.
- type: bool
- sample: true
- status:
- description: The router status.
- type: str
- sample: "ACTIVE"
- tenant_id:
- description: The tenant ID.
- type: str
- sample: "861174b82b43463c9edc5202aadc60ef"
- external_gateway_info:
- description: The external gateway parameters.
- type: dict
- sample: {
- "enable_snat": true,
- "external_fixed_ips": [
- {
- "ip_address": "10.6.6.99",
- "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
- }
- ]
- }
- routes:
- description: The extra routes configuration for L3 router.
- type: list
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-import itertools
-
-
-class RouterModule(OpenStackModule):
- argument_spec = dict(
- state=dict(default='present', choices=['absent', 'present']),
- name=dict(required=True),
- admin_state_up=dict(type='bool', default=True),
- enable_snat=dict(type='bool'),
- network=dict(default=None),
- interfaces=dict(type='list', default=None, elements='raw'),
- external_fixed_ips=dict(type='list', default=None, elements='dict'),
- project=dict(default=None)
- )
-
- def _get_subnet_ids_from_ports(self, ports):
- return [fixed_ip['subnet_id'] for fixed_ip in
- itertools.chain.from_iterable(port['fixed_ips'] for port in ports if 'fixed_ips' in port)]
-
- def _needs_update(self, router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg):
- """Decide if the given router needs an update."""
- if router['admin_state_up'] != self.params['admin_state_up']:
- return True
- if router['external_gateway_info']:
- # check if enable_snat is set in module params
- if self.params['enable_snat'] is not None:
- if router['external_gateway_info'].get('enable_snat', True) != self.params['enable_snat']:
- return True
- if net:
- if not router['external_gateway_info']:
- return True
- elif router['external_gateway_info']['network_id'] != net['id']:
- return True
-
- # check if external_fixed_ip has to be added
- for external_fixed_ip in router_ifs_cfg['external_fixed_ips']:
- exists = False
-
- # compare the requested interface with existing, looking for an existing match
- for existing_if in router['external_gateway_info']['external_fixed_ips']:
- if existing_if['subnet_id'] == external_fixed_ip['subnet_id']:
- if 'ip' in external_fixed_ip:
- if existing_if['ip_address'] == external_fixed_ip['ip']:
- # both subnet id and ip address match
- exists = True
- break
- else:
- # only the subnet was given, so ip doesn't matter
- exists = True
- break
-
- # this interface isn't present on the existing router
- if not exists:
- return True
-
- # check if external_fixed_ip has to be removed
- if router_ifs_cfg['external_fixed_ips']:
- for external_fixed_ip in router['external_gateway_info']['external_fixed_ips']:
- obsolete = True
-
- # compare the existing interface with requested, looking for an requested match
- for requested_if in router_ifs_cfg['external_fixed_ips']:
- if external_fixed_ip['subnet_id'] == requested_if['subnet_id']:
- if 'ip' in requested_if:
- if external_fixed_ip['ip_address'] == requested_if['ip']:
- # both subnet id and ip address match
- obsolete = False
- break
- else:
- # only the subnet was given, so ip doesn't matter
- obsolete = False
- break
-
- # this interface isn't present on the existing router
- if obsolete:
- return True
- else:
- # no external fixed ips requested
- if router['external_gateway_info'] \
- and router['external_gateway_info']['external_fixed_ips'] \
- and len(router['external_gateway_info']['external_fixed_ips']) > 1:
- # but router has several external fixed ips
- return True
-
- # check if internal port has to be added
- if router_ifs_cfg['internal_ports_missing']:
- return True
-
- if missing_port_ids:
- return True
-
- # check if internal subnet has to be added or removed
- if set(requested_subnet_ids) != set(existing_subnet_ids):
- return True
-
- return False
-
- def _build_kwargs(self, router, net):
- kwargs = {
- 'admin_state_up': self.params['admin_state_up'],
- }
-
- if router:
- kwargs['name_or_id'] = router['id']
- else:
- kwargs['name'] = self.params['name']
-
- if net:
- kwargs['ext_gateway_net_id'] = net['id']
- # can't send enable_snat unless we have a network
- if self.params.get('enable_snat') is not None:
- kwargs['enable_snat'] = self.params['enable_snat']
-
- if self.params['external_fixed_ips']:
- kwargs['ext_fixed_ips'] = []
- for iface in self.params['external_fixed_ips']:
- subnet = self.conn.get_subnet(iface['subnet'])
- d = {'subnet_id': subnet['id']}
- if 'ip' in iface:
- d['ip_address'] = iface['ip']
- kwargs['ext_fixed_ips'].append(d)
- else:
- # no external fixed ips requested
- if router \
- and router['external_gateway_info'] \
- and router['external_gateway_info']['external_fixed_ips'] \
- and len(router['external_gateway_info']['external_fixed_ips']) > 1:
- # but router has several external fixed ips
- # keep first external fixed ip only
- fip = router['external_gateway_info']['external_fixed_ips'][0]
- kwargs['ext_fixed_ips'] = [fip]
-
- return kwargs
-
- def _build_router_interface_config(self, filters=None):
- external_fixed_ips = []
- internal_subnets = []
- internal_ports = []
- internal_ports_missing = []
-
- # Build external interface configuration
- if self.params['external_fixed_ips']:
- for iface in self.params['external_fixed_ips']:
- subnet = self.conn.get_subnet(iface['subnet'], filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface['subnet'])
- new_external_fixed_ip = {'subnet_name': subnet.name, 'subnet_id': subnet.id}
- if 'ip' in iface:
- new_external_fixed_ip['ip'] = iface['ip']
- external_fixed_ips.append(new_external_fixed_ip)
-
- # Build internal interface configuration
- if self.params['interfaces']:
- internal_ips = []
- for iface in self.params['interfaces']:
- if isinstance(iface, str):
- subnet = self.conn.get_subnet(iface, filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface)
- internal_subnets.append(subnet)
-
- elif isinstance(iface, dict):
- subnet = self.conn.get_subnet(iface['subnet'], filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface['subnet'])
-
- net = self.conn.get_network(iface['net'])
- if not net:
- self.fail(msg='net %s not found' % iface['net'])
-
- if "portip" not in iface:
- # portip not set, add any ip from subnet
- internal_subnets.append(subnet)
- elif not iface['portip']:
- # portip is set but has invalid value
- self.fail(msg='put an ip in portip or remove it from list to assign default port to router')
- else:
- # portip has valid value
- # look for ports whose fixed_ips.ip_address matchs portip
- for existing_port in self.conn.list_ports(filters={'network_id': net.id}):
- for fixed_ip in existing_port['fixed_ips']:
- if iface['portip'] == fixed_ip['ip_address']:
- # portip exists in net already
- internal_ports.append(existing_port)
- internal_ips.append(fixed_ip['ip_address'])
- if iface['portip'] not in internal_ips:
- # no port with portip exists hence create a new port
- internal_ports_missing.append({
- 'network_id': net.id,
- 'fixed_ips': [{'ip_address': iface['portip'], 'subnet_id': subnet.id}]
- })
-
- return {
- 'external_fixed_ips': external_fixed_ips,
- 'internal_subnets': internal_subnets,
- 'internal_ports': internal_ports,
- 'internal_ports_missing': internal_ports_missing
- }
-
- def run(self):
-
- state = self.params['state']
- name = self.params['name']
- network = self.params['network']
- project = self.params['project']
-
- if self.params['external_fixed_ips'] and not network:
- self.fail(msg='network is required when supplying external_fixed_ips')
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail(msg='Project %s could not be found' % project)
- project_id = proj['id']
- filters = {'tenant_id': project_id}
- else:
- project_id = None
- filters = None
-
- router = self.conn.get_router(name, filters=filters)
- net = None
- if network:
- net = self.conn.get_network(network)
- if not net:
- self.fail(msg='network %s not found' % network)
-
- # Validate and cache the subnet IDs so we can avoid duplicate checks
- # and expensive API calls.
- router_ifs_cfg = self._build_router_interface_config(filters)
- requested_subnet_ids = [subnet.id for subnet in router_ifs_cfg['internal_subnets']] + \
- self._get_subnet_ids_from_ports(router_ifs_cfg['internal_ports'])
- requested_port_ids = [i['id'] for i in router_ifs_cfg['internal_ports']]
-
- if router:
- router_ifs_internal = self.conn.list_router_interfaces(router, 'internal')
- existing_subnet_ids = self._get_subnet_ids_from_ports(router_ifs_internal)
- obsolete_subnet_ids = set(existing_subnet_ids) - set(requested_subnet_ids)
- existing_port_ids = [i['id'] for i in router_ifs_internal]
-
- else:
- router_ifs_internal = []
- existing_subnet_ids = []
- obsolete_subnet_ids = []
- existing_port_ids = []
-
- missing_port_ids = set(requested_port_ids) - set(existing_port_ids)
-
- if self.ansible.check_mode:
- # Check if the system state would be changed
- if state == 'absent' and router:
- changed = True
- elif state == 'absent' and not router:
- changed = False
- elif state == 'present' and not router:
- changed = True
- else: # if state == 'present' and router
- changed = self._needs_update(router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg)
- self.exit_json(changed=changed)
-
- if state == 'present':
- changed = False
-
- if not router:
- changed = True
-
- kwargs = self._build_kwargs(router, net)
- if project_id:
- kwargs['project_id'] = project_id
- router = self.conn.create_router(**kwargs)
-
- # add interface by subnet id, because user did not specify a port id
- for subnet in router_ifs_cfg['internal_subnets']:
- self.conn.add_router_interface(router, subnet_id=subnet.id)
-
- # add interface by port id if user did specify a valid port id
- for port in router_ifs_cfg['internal_ports']:
- self.conn.add_router_interface(router, port_id=port.id)
-
- # add port and interface if user did specify an ip address but port is missing yet
- for missing_internal_port in router_ifs_cfg['internal_ports_missing']:
- p = self.conn.create_port(**missing_internal_port)
- if p:
- self.conn.add_router_interface(router, port_id=p.id)
-
- else:
- if self._needs_update(router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg):
- changed = True
- kwargs = self._build_kwargs(router, net)
- updated_router = self.conn.update_router(**kwargs)
-
- # Protect against update_router() not actually updating the router.
- if not updated_router:
- changed = False
- else:
- router = updated_router
-
- # delete internal subnets i.e. ports
- if obsolete_subnet_ids:
- for port in router_ifs_internal:
- if 'fixed_ips' in port:
- for fip in port['fixed_ips']:
- if fip['subnet_id'] in obsolete_subnet_ids:
- self.conn.remove_router_interface(router, port_id=port['id'])
- changed = True
-
- # add new internal interface by subnet id, because user did not specify a port id
- for subnet in router_ifs_cfg['internal_subnets']:
- if subnet.id not in existing_subnet_ids:
- self.conn.add_router_interface(router, subnet_id=subnet.id)
- changed = True
-
- # add new internal interface by port id if user did specify a valid port id
- for port_id in missing_port_ids:
- self.conn.add_router_interface(router, port_id=port_id)
- changed = True
-
- # add new port and new internal interface if user did specify an ip address but port is missing yet
- for missing_internal_port in router_ifs_cfg['internal_ports_missing']:
- p = self.conn.create_port(**missing_internal_port)
- if p:
- self.conn.add_router_interface(router, port_id=p.id)
- changed = True
-
- self.exit_json(changed=changed, router=router)
-
- elif state == 'absent':
- if not router:
- self.exit_json(changed=False)
- else:
- # We need to detach all internal interfaces on a router
- # before we will be allowed to delete it. Deletion can
- # still fail if e.g. floating ips are attached to the
- # router.
- for port in router_ifs_internal:
- self.conn.remove_router_interface(router, port_id=port['id'])
- self.conn.delete_router(router['id'])
- self.exit_json(changed=True, router=router)
-
-
-def main():
- module = RouterModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_routers_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_routers_info.py
deleted file mode 100644
index 990eef8dc..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_routers_info.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2019, Bram Verschueren <verschueren.bram@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: routers_info
-short_description: Retrieve information about one or more OpenStack routers.
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about one or more routers from OpenStack.
-options:
- name:
- description:
- - Name or ID of the router
- required: false
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- required: false
- type: dict
- suboptions:
- project_id:
- description:
- - Filter the list result by the ID of the project that owns the resource.
- type: str
- aliases:
- - tenant_id
- name:
- description:
- - Filter the list result by the human-readable name of the resource.
- type: str
- description:
- description:
- - Filter the list result by the human-readable description of the resource.
- type: str
- admin_state_up:
- description:
- - Filter the list result by the administrative state of the resource, which is up (true) or down (false).
- type: bool
- revision_number:
- description:
- - Filter the list result by the revision number of the resource.
- type: int
- tags:
- description:
- - A list of tags to filter the list result by. Resources that match all tags in this list will be returned.
- type: list
- elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Gather information about routers
- openstack.cloud.routers_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- register: result
-
-- name: Show openstack routers
- debug:
- msg: "{{ result.openstack_routers }}"
-
-- name: Gather information about a router by name
- openstack.cloud.routers_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- name: router1
- register: result
-
-- name: Show openstack routers
- debug:
- msg: "{{ result.openstack_routers }}"
-
-- name: Gather information about a router with filter
- openstack.cloud.routers_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- filters:
- tenant_id: bc3ea709c96849d6b81f54640400a19f
- register: result
-
-- name: Show openstack routers
- debug:
- msg: "{{ result.openstack_routers }}"
-'''
-
-RETURN = '''
-openstack_routers:
- description: has all the openstack information about the routers
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the router.
- returned: success
- type: str
- status:
- description: Router status.
- returned: success
- type: str
- external_gateway_info:
- description: The external gateway information of the router.
- returned: success
- type: dict
- interfaces_info:
- description: List of connected interfaces.
- returned: success
- type: list
- distributed:
- description: Indicates a distributed router.
- returned: success
- type: bool
- ha:
- description: Indicates a highly-available router.
- returned: success
- type: bool
- project_id:
- description: Project id associated with this router.
- returned: success
- type: str
- routes:
- description: The extra routes configuration for L3 router.
- returned: success
- type: list
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class RouterInfoModule(OpenStackModule):
-
- deprecated_names = ('os_routers_info', 'openstack.cloud.os_routers_info')
-
- argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None)
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
-
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
- if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
- routers = self.conn.search_routers(**kwargs)
-
- for router in routers:
- interfaces_info = []
- for port in self.conn.list_router_interfaces(router):
- if port.device_owner != "network:router_gateway":
- for ip_spec in port.fixed_ips:
- int_info = {
- 'port_id': port.id,
- 'ip_address': ip_spec.get('ip_address'),
- 'subnet_id': ip_spec.get('subnet_id')
- }
- interfaces_info.append(int_info)
- router['interfaces_info'] = interfaces_info
-
- self.exit(changed=False, openstack_routers=routers)
-
-
-def main():
- module = RouterInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_security_group.py b/ansible_collections/openstack/cloud/plugins/modules/os_security_group.py
deleted file mode 100644
index 8208a1c22..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_security_group.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: security_group
-short_description: Add/Delete security groups from an OpenStack cloud.
-author: OpenStack Ansible SIG
-description:
- - Add or Remove security groups from an OpenStack cloud.
-options:
- name:
- description:
- - Name that has to be given to the security group. This module
- requires that security group names be unique.
- required: true
- type: str
- description:
- description:
- - Long description of the purpose of the security group
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- project:
- description:
- - Unique name or ID of the project.
- required: false
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a security group
-- openstack.cloud.security_group:
- cloud: mordred
- state: present
- name: foo
- description: security group for foo servers
-
-# Update the existing 'foo' security group description
-- openstack.cloud.security_group:
- cloud: mordred
- state: present
- name: foo
- description: updated description for the foo security group
-
-# Create a security group for a given project
-- openstack.cloud.security_group:
- cloud: mordred
- state: present
- name: foo
- project: myproj
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class SecurityGroupModule(OpenStackModule):
-
- argument_spec = dict(
- name=dict(required=True),
- description=dict(default=''),
- state=dict(default='present', choices=['absent', 'present']),
- project=dict(default=None),
- )
-
- def _needs_update(self, secgroup):
- """Check for differences in the updatable values.
-
- NOTE: We don't currently allow name updates.
- """
- if secgroup['description'] != self.params['description']:
- return True
- return False
-
- def _system_state_change(self, secgroup):
- state = self.params['state']
- if state == 'present':
- if not secgroup:
- return True
- return self._needs_update(secgroup)
- if state == 'absent' and secgroup:
- return True
- return False
-
- def run(self):
-
- name = self.params['name']
- state = self.params['state']
- description = self.params['description']
- project = self.params['project']
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- else:
- project_id = self.conn.current_project_id
-
- if project_id:
- filters = {'tenant_id': project_id}
- else:
- filters = None
-
- secgroup = self.conn.get_security_group(name, filters=filters)
-
- if self.ansible.check_mode:
- self.exit(changed=self._system_state_change(secgroup))
-
- changed = False
- if state == 'present':
- if not secgroup:
- kwargs = {}
- if project_id:
- kwargs['project_id'] = project_id
- secgroup = self.conn.create_security_group(name, description,
- **kwargs)
- changed = True
- else:
- if self._needs_update(secgroup):
- secgroup = self.conn.update_security_group(
- secgroup['id'], description=description)
- changed = True
- self.exit(
- changed=changed, id=secgroup['id'], secgroup=secgroup)
-
- if state == 'absent':
- if secgroup:
- self.conn.delete_security_group(secgroup['id'])
- changed = True
- self.exit(changed=changed)
-
-
-def main():
- module = SecurityGroupModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_security_group_rule.py b/ansible_collections/openstack/cloud/plugins/modules/os_security_group_rule.py
deleted file mode 100644
index 53fe6f590..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_security_group_rule.py
+++ /dev/null
@@ -1,389 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: security_group_rule
-short_description: Add/Delete rule from an existing security group
-author: OpenStack Ansible SIG
-description:
- - Add or Remove rule from an existing security group
-options:
- security_group:
- description:
- - Name or ID of the security group
- required: true
- type: str
- protocol:
- description:
- - IP protocols ANY TCP UDP ICMP and others, also number in range 0-255
- type: str
- port_range_min:
- description:
- - Starting port
- type: int
- port_range_max:
- description:
- - Ending port
- type: int
- remote_ip_prefix:
- description:
- - Source IP address(es) in CIDR notation (exclusive with remote_group)
- type: str
- remote_group:
- description:
- - Name or ID of the Security group to link (exclusive with
- remote_ip_prefix)
- type: str
- ethertype:
- description:
- - Must be IPv4 or IPv6, and addresses represented in CIDR must
- match the ingress or egress rules. Not all providers support IPv6.
- choices: ['IPv4', 'IPv6']
- default: IPv4
- type: str
- direction:
- description:
- - The direction in which the security group rule is applied. Not
- all providers support egress.
- choices: ['egress', 'ingress']
- default: ingress
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- project:
- description:
- - Unique name or ID of the project.
- required: false
- type: str
- description:
- required: false
- description:
- - Description of the rule.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a security group rule
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: tcp
- port_range_min: 80
- port_range_max: 80
- remote_ip_prefix: 0.0.0.0/0
-
-# Create a security group rule for ping
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: icmp
- remote_ip_prefix: 0.0.0.0/0
-
-# Another way to create the ping rule
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: icmp
- port_range_min: -1
- port_range_max: -1
- remote_ip_prefix: 0.0.0.0/0
-
-# Create a TCP rule covering all ports
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: tcp
- port_range_min: 1
- port_range_max: 65535
- remote_ip_prefix: 0.0.0.0/0
-
-# Another way to create the TCP rule above (defaults to all ports)
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: tcp
- remote_ip_prefix: 0.0.0.0/0
-
-# Create a rule for VRRP with numbered protocol 112
-- openstack.cloud.security_group_rule:
- security_group: loadbalancer_sg
- protocol: 112
- remote_group: loadbalancer-node_sg
-
-# Create a security group rule for a given project
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: icmp
- remote_ip_prefix: 0.0.0.0/0
- project: myproj
-
-# Remove the default created egress rule for IPv4
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: any
- remote_ip_prefix: 0.0.0.0/0
-'''
-
-RETURN = '''
-id:
- description: Unique rule UUID.
- type: str
- returned: state == present
-direction:
- description: The direction in which the security group rule is applied.
- type: str
- sample: 'egress'
- returned: state == present
-ethertype:
- description: One of IPv4 or IPv6.
- type: str
- sample: 'IPv4'
- returned: state == present
-port_range_min:
- description: The minimum port number in the range that is matched by
- the security group rule.
- type: int
- sample: 8000
- returned: state == present
-port_range_max:
- description: The maximum port number in the range that is matched by
- the security group rule.
- type: int
- sample: 8000
- returned: state == present
-protocol:
- description: The protocol that is matched by the security group rule.
- type: str
- sample: 'tcp'
- returned: state == present
-remote_ip_prefix:
- description: The remote IP prefix to be associated with this security group rule.
- type: str
- sample: '0.0.0.0/0'
- returned: state == present
-security_group_id:
- description: The security group ID to associate with this security group rule.
- type: str
- returned: state == present
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- OpenStackModule)
-
-
-def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
- """
- Capture the complex port matching logic.
-
- The port values coming in for the module might be -1 (for ICMP),
- which will work only for Nova, but this is handled by sdk. Likewise,
- they might be None, which works for Neutron, but not Nova. This too is
- handled by sdk. Since sdk will consistently return these port
- values as None, we need to convert any -1 values input to the module
- to None here for comparison.
-
- For TCP and UDP protocols, None values for both min and max are
- represented as the range 1-65535 for Nova, but remain None for
- Neutron. sdk returns the full range when Nova is the backend (since
- that is how Nova stores them), and None values for Neutron. If None
- values are input to the module for both values, then we need to adjust
- for comparison.
- """
-
- # Check if the user is supplying -1 for ICMP.
- if protocol in ['icmp', 'ipv6-icmp']:
- if module_min and int(module_min) == -1:
- module_min = None
- if module_max and int(module_max) == -1:
- module_max = None
-
- # Rules with 'any' protocol do not match ports
- if protocol == 'any':
- return True
-
- # Check if the user is supplying -1, 1 to 65535 or None values for full TPC/UDP port range.
- if protocol in ['tcp', 'udp'] or protocol is None:
- if (
- not module_min and not module_max
- or (int(module_min) in [-1, 1]
- and int(module_max) in [-1, 65535])
- ):
- if (
- not rule_min and not rule_max
- or (int(rule_min) in [-1, 1]
- and int(rule_max) in [-1, 65535])
- ):
- # (None, None) == (1, 65535) == (-1, -1)
- return True
-
- # Sanity check to make sure we don't have type comparison issues.
- if module_min:
- module_min = int(module_min)
- if module_max:
- module_max = int(module_max)
- if rule_min:
- rule_min = int(rule_min)
- if rule_max:
- rule_max = int(rule_max)
-
- return module_min == rule_min and module_max == rule_max
-
-
-class SecurityGroupRuleModule(OpenStackModule):
- deprecated_names = ('os_security_group_rule', 'openstack.cloud.os_security_group_rule')
-
- argument_spec = dict(
- security_group=dict(required=True),
- protocol=dict(type='str'),
- port_range_min=dict(required=False, type='int'),
- port_range_max=dict(required=False, type='int'),
- remote_ip_prefix=dict(required=False),
- remote_group=dict(required=False),
- ethertype=dict(default='IPv4',
- choices=['IPv4', 'IPv6']),
- direction=dict(default='ingress',
- choices=['egress', 'ingress']),
- state=dict(default='present',
- choices=['absent', 'present']),
- description=dict(required=False, default=None),
- project=dict(default=None),
- )
-
- module_kwargs = dict(
- mutually_exclusive=[
- ['remote_ip_prefix', 'remote_group'],
- ]
- )
-
- def _find_matching_rule(self, secgroup, remotegroup):
- """
- Find a rule in the group that matches the module parameters.
- :returns: The matching rule dict, or None if no matches.
- """
- protocol = self.params['protocol']
- remote_ip_prefix = self.params['remote_ip_prefix']
- ethertype = self.params['ethertype']
- direction = self.params['direction']
- remote_group_id = remotegroup['id']
-
- for rule in secgroup['security_group_rules']:
- if (
- protocol == rule['protocol']
- and remote_ip_prefix == rule['remote_ip_prefix']
- and ethertype == rule['ethertype']
- and direction == rule['direction']
- and remote_group_id == rule['remote_group_id']
- and _ports_match(
- protocol,
- self.params['port_range_min'],
- self.params['port_range_max'],
- rule['port_range_min'],
- rule['port_range_max'])
- ):
- return rule
- return None
-
- def _system_state_change(self, secgroup, remotegroup):
- state = self.params['state']
- if secgroup:
- rule_exists = self._find_matching_rule(secgroup, remotegroup)
- else:
- return False
-
- if state == 'present' and not rule_exists:
- return True
- if state == 'absent' and rule_exists:
- return True
- return False
-
- def run(self):
-
- state = self.params['state']
- security_group = self.params['security_group']
- remote_group = self.params['remote_group']
- project = self.params['project']
- changed = False
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- else:
- project_id = self.conn.current_project_id
-
- if project_id and not remote_group:
- filters = {'tenant_id': project_id}
- else:
- filters = None
-
- secgroup = self.conn.get_security_group(security_group, filters=filters)
-
- if remote_group:
- remotegroup = self.conn.get_security_group(remote_group, filters=filters)
- else:
- remotegroup = {'id': None}
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(secgroup, remotegroup))
-
- if state == 'present':
- if self.params['protocol'] == 'any':
- self.params['protocol'] = None
-
- if not secgroup:
- self.fail_json(msg='Could not find security group %s' % security_group)
-
- rule = self._find_matching_rule(secgroup, remotegroup)
- if not rule:
- kwargs = {}
- if project_id:
- kwargs['project_id'] = project_id
- if self.params["description"] is not None:
- kwargs["description"] = self.params['description']
- rule = self.conn.network.create_security_group_rule(
- security_group_id=secgroup['id'],
- port_range_min=None if self.params['port_range_min'] == -1 else self.params['port_range_min'],
- port_range_max=None if self.params['port_range_max'] == -1 else self.params['port_range_max'],
- protocol=self.params['protocol'],
- remote_ip_prefix=self.params['remote_ip_prefix'],
- remote_group_id=remotegroup['id'],
- direction=self.params['direction'],
- ethertype=self.params['ethertype'],
- **kwargs
- )
- changed = True
- self.exit_json(changed=changed, rule=rule, id=rule['id'])
-
- if state == 'absent' and secgroup:
- rule = self._find_matching_rule(secgroup, remotegroup)
- if rule:
- self.conn.delete_security_group_rule(rule['id'])
- changed = True
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = SecurityGroupRuleModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server.py b/ansible_collections/openstack/cloud/plugins/modules/os_server.py
deleted file mode 100644
index a3ca7d051..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server.py
+++ /dev/null
@@ -1,805 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright 2019 Red Hat, Inc.
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2013, Benno Joy <benno@ansible.com>
-# Copyright (c) 2013, John Dewey <john@dewey.ws>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server
-short_description: Create/Delete Compute Instances from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Create or Remove compute instances from OpenStack.
-options:
- name:
- description:
- - Name that has to be given to the instance. It is also possible to
- specify the ID of the instance instead of its name if I(state) is I(absent).
- required: true
- type: str
- image:
- description:
- - The name or id of the base image to boot.
- - Required when I(boot_from_volume=true)
- type: str
- image_exclude:
- description:
- - Text to use to filter image names, for the case, such as HP, where
- there are multiple image names matching the common identifying
- portions. image_exclude is a negative match filter - it is text that
- may not exist in the image name.
- type: str
- default: "(deprecated)"
- flavor:
- description:
- - The name or id of the flavor in which the new instance has to be
- created.
- - Exactly one of I(flavor) and I(flavor_ram) must be defined when
- I(state=present).
- type: str
- flavor_ram:
- description:
- - The minimum amount of ram in MB that the flavor in which the new
- instance has to be created must have.
- - Exactly one of I(flavor) and I(flavor_ram) must be defined when
- I(state=present).
- type: int
- flavor_include:
- description:
- - Text to use to filter flavor names, for the case, such as Rackspace,
- where there are multiple flavors that have the same ram count.
- flavor_include is a positive match filter - it must exist in the
- flavor name.
- type: str
- key_name:
- description:
- - The key pair name to be used when creating a instance
- type: str
- security_groups:
- description:
- - Names of the security groups to which the instance should be
- added. This may be a YAML list or a comma separated string.
- type: list
- default: ['default']
- elements: str
- network:
- description:
- - Name or ID of a network to attach this instance to. A simpler
- version of the nics parameter, only one of network or nics should
- be supplied.
- type: str
- nics:
- description:
- - A list of networks to which the instance's interface should
- be attached. Networks may be referenced by net-id/net-name/port-id
- or port-name.
- - 'Also this accepts a string containing a list of (net/port)-(id/name)
- Eg: nics: "net-id=uuid-1,port-name=myport"
- Only one of network or nics should be supplied.'
- type: list
- elements: raw
- suboptions:
- tag:
- description:
- - 'A "tag" for the specific port to be passed via metadata.
- Eg: tag: test_tag'
- auto_ip:
- description:
- - Ensure instance has public ip however the cloud wants to do that
- type: bool
- default: 'yes'
- aliases: ['auto_floating_ip', 'public_ip']
- floating_ips:
- description:
- - list of valid floating IPs that pre-exist to assign to this node
- type: list
- elements: str
- floating_ip_pools:
- description:
- - Name of floating IP pool from which to choose a floating IP
- type: list
- elements: str
- meta:
- description:
- - 'A list of key value pairs that should be provided as a metadata to
- the new instance or a string containing a list of key-value pairs.
- Eg: meta: "key1=value1,key2=value2"'
- type: raw
- wait:
- description:
- - If the module should wait for the instance to be created.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the instance to get
- into active state.
- default: 180
- type: int
- config_drive:
- description:
- - Whether to boot the server with config drive enabled
- type: bool
- default: 'no'
- userdata:
- description:
- - Opaque blob of data which is made available to the instance
- type: str
- aliases: ['user_data']
- boot_from_volume:
- description:
- - Should the instance boot from a persistent volume created based on
- the image given. Mutually exclusive with boot_volume.
- type: bool
- default: 'no'
- volume_size:
- description:
- - The size of the volume to create in GB if booting from volume based
- on an image.
- type: int
- boot_volume:
- description:
- - Volume name or id to use as the volume to boot from. Implies
- boot_from_volume. Mutually exclusive with image and boot_from_volume.
- aliases: ['root_volume']
- type: str
- terminate_volume:
- description:
- - If C(yes), delete volume when deleting instance (if booted from volume)
- type: bool
- default: 'no'
- volumes:
- description:
- - A list of preexisting volumes names or ids to attach to the instance
- default: []
- type: list
- elements: str
- scheduler_hints:
- description:
- - Arbitrary key/value pairs to the scheduler for custom use
- type: dict
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- delete_fip:
- description:
- - When I(state) is absent and this option is true, any floating IP
- associated with the instance will be deleted along with the instance.
- type: bool
- default: 'no'
- reuse_ips:
- description:
- - When I(auto_ip) is true and this option is true, the I(auto_ip) code
- will attempt to re-use unassigned floating ips in the project before
- creating a new one. It is important to note that it is impossible
- to safely do this concurrently, so if your use case involves
- concurrent server creation, it is highly recommended to set this to
- false and to delete the floating ip associated with a server when
- the server is deleted using I(delete_fip).
- type: bool
- default: 'yes'
- availability_zone:
- description:
- - Availability zone in which to create the server.
- type: str
- description:
- description:
- - Description of the server.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Create a new instance and attaches to a network and passes metadata to the instance
- openstack.cloud.server:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- nics:
- - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- - net-name: another_network
- meta:
- hostname: test1
- group: uge_master
-
-# Create a new instance in HP Cloud AE1 region availability zone az2 and
-# automatically assigns a floating IP
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: username
- password: Equality7-2521
- project_name: username-project1
- name: vm1
- region_name: region-b.geo-1
- availability_zone: az2
- image: 9302692b-b787-4b52-a3a6-daebb79cb498
- key_name: test
- timeout: 200
- flavor: 101
- security_groups: default
- auto_ip: yes
-
-# Create a new instance in named cloud mordred availability zone az2
-# and assigns a pre-known floating IP
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- state: present
- cloud: mordred
- name: vm1
- availability_zone: az2
- image: 9302692b-b787-4b52-a3a6-daebb79cb498
- key_name: test
- timeout: 200
- flavor: 101
- floating_ips:
- - 12.34.56.79
-
-# Create a new instance with 4G of RAM on Ubuntu Trusty, ignoring
-# deprecated images
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- state: present
- cloud: mordred
- region_name: region-b.geo-1
- image: Ubuntu Server 14.04
- image_exclude: deprecated
- flavor_ram: 4096
-
-# Create a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- cloud: rax-dfw
- state: present
- image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
- flavor_ram: 4096
- flavor_include: Performance
-
-# Creates a new instance and attaches to multiple network
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance with a string
- openstack.cloud.server:
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
-
-- name: Creates a new instance and attaches to a network and passes metadata to the instance
- openstack.cloud.server:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- nics:
- - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- - net-name: another_network
- meta: "hostname=test1,group=uge_master"
-
-- name: Creates a new instance and attaches to a specific network
- openstack.cloud.server:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- network: another_network
-
-# Create a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- state: present
- cloud: mordred
- region_name: ams01
- image: Ubuntu Server 14.04
- flavor_ram: 4096
- boot_from_volume: True
- volume_size: 75
-
-# Creates a new instance with 2 volumes attached
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- state: present
- cloud: mordred
- region_name: ams01
- image: Ubuntu Server 14.04
- flavor_ram: 4096
- volumes:
- - photos
- - music
-
-# Creates a new instance with provisioning userdata using Cloud-Init
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- state: present
- image: "Ubuntu Server 14.04"
- flavor: "P-1"
- network: "Production"
- userdata: |
- #cloud-config
- chpasswd:
- list: |
- ubuntu:{{ default_password }}
- expire: False
- packages:
- - ansible
- package_upgrade: true
-
-# Creates a new instance with provisioning userdata using Bash Scripts
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- name: vm1
- state: present
- image: "Ubuntu Server 14.04"
- flavor: "P-1"
- network: "Production"
- userdata: |
- {%- raw -%}#!/bin/bash
- echo " up ip route add 10.0.0.0/8 via {% endraw -%}{{ intra_router }}{%- raw -%}" >> /etc/network/interfaces.d/eth0.conf
- echo " down ip route del 10.0.0.0/8" >> /etc/network/interfaces.d/eth0.conf
- ifdown eth0 && ifup eth0
- {% endraw %}
-
-# Create a new instance with server group for (anti-)affinity
-# server group ID is returned from openstack.cloud.server_group module.
-- name: launch a compute instance
- hosts: localhost
- tasks:
- - name: launch an instance
- openstack.cloud.server:
- state: present
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- flavor: 4
- scheduler_hints:
- group: f5c8c61a-9230-400a-8ed2-3b023c190a7f
-
-# Create an instance with "tags" for the nic
-- name: Create instance with nics "tags"
- openstack.cloud.server:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- flavor: 4
- nics:
- - port-name: net1_port1
- tag: test_tag
- - net-name: another_network
-
-# Deletes an instance via its ID
-- name: remove an instance
- hosts: localhost
- tasks:
- - name: remove an instance
- openstack.cloud.server:
- name: abcdef01-2345-6789-0abc-def0123456789
- state: absent
-
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_find_nova_addresses, OpenStackModule)
-
-
-def _parse_nics(nics):
- for net in nics:
- if isinstance(net, str):
- for nic in net.split(','):
- yield dict((nic.split('='),))
- else:
- yield net
-
-
-def _parse_meta(meta):
- if isinstance(meta, str):
- metas = {}
- for kv_str in meta.split(","):
- k, v = kv_str.split("=")
- metas[k] = v
- return metas
- if not meta:
- return {}
- return meta
-
-
-class ServerModule(OpenStackModule):
- deprecated_names = ('os_server', 'openstack.cloud.os_server')
-
- argument_spec = dict(
- name=dict(required=True),
- image=dict(default=None),
- image_exclude=dict(default='(deprecated)'),
- flavor=dict(default=None),
- flavor_ram=dict(default=None, type='int'),
- flavor_include=dict(default=None),
- key_name=dict(default=None),
- security_groups=dict(default=['default'], type='list', elements='str'),
- network=dict(default=None),
- nics=dict(default=[], type='list', elements='raw'),
- meta=dict(default=None, type='raw'),
- userdata=dict(default=None, aliases=['user_data']),
- config_drive=dict(default=False, type='bool'),
- auto_ip=dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
- floating_ips=dict(default=None, type='list', elements='str'),
- floating_ip_pools=dict(default=None, type='list', elements='str'),
- volume_size=dict(default=None, type='int'),
- boot_from_volume=dict(default=False, type='bool'),
- boot_volume=dict(default=None, aliases=['root_volume']),
- terminate_volume=dict(default=False, type='bool'),
- volumes=dict(default=[], type='list', elements='str'),
- scheduler_hints=dict(default=None, type='dict'),
- state=dict(default='present', choices=['absent', 'present']),
- delete_fip=dict(default=False, type='bool'),
- reuse_ips=dict(default=True, type='bool'),
- description=dict(default=None, type='str'),
- )
- module_kwargs = dict(
- mutually_exclusive=[
- ['auto_ip', 'floating_ips'],
- ['auto_ip', 'floating_ip_pools'],
- ['floating_ips', 'floating_ip_pools'],
- ['flavor', 'flavor_ram'],
- ['image', 'boot_volume'],
- ['boot_from_volume', 'boot_volume'],
- ['nics', 'network'],
- ],
- required_if=[
- ('boot_from_volume', True, ['volume_size', 'image']),
- ],
- )
-
- def run(self):
-
- state = self.params['state']
- image = self.params['image']
- boot_volume = self.params['boot_volume']
- flavor = self.params['flavor']
- flavor_ram = self.params['flavor_ram']
-
- if state == 'present':
- if not (image or boot_volume):
- self.fail(
- msg="Parameter 'image' or 'boot_volume' is required "
- "if state == 'present'"
- )
- if not flavor and not flavor_ram:
- self.fail(
- msg="Parameter 'flavor' or 'flavor_ram' is required "
- "if state == 'present'"
- )
-
- if state == 'present':
- self._get_server_state()
- self._create_server()
- elif state == 'absent':
- self._get_server_state()
- self._delete_server()
-
- def _exit_hostvars(self, server, changed=True):
- hostvars = self.conn.get_openstack_vars(server)
- self.exit(
- changed=changed, server=server, id=server.id, openstack=hostvars)
-
- def _get_server_state(self):
- state = self.params['state']
- server = self.conn.get_server(self.params['name'])
- if server and state == 'present':
- if server.status not in ('ACTIVE', 'SHUTOFF', 'PAUSED', 'SUSPENDED'):
- self.fail(
- msg="The instance is available but not Active state: " + server.status)
- (ip_changed, server) = self._check_ips(server)
- (sg_changed, server) = self._check_security_groups(server)
- (server_changed, server) = self._update_server(server)
- self._exit_hostvars(server, ip_changed or sg_changed or server_changed)
- if server and state == 'absent':
- return True
- if state == 'absent':
- self.exit(changed=False, result="not present")
- return True
-
- def _create_server(self):
- flavor = self.params['flavor']
- flavor_ram = self.params['flavor_ram']
- flavor_include = self.params['flavor_include']
-
- image_id = None
- if not self.params['boot_volume']:
- image_id = self.conn.get_image_id(
- self.params['image'], self.params['image_exclude'])
- if not image_id:
- self.fail(
- msg="Could not find image %s" % self.params['image'])
-
- if flavor:
- flavor_dict = self.conn.get_flavor(flavor)
- if not flavor_dict:
- self.fail(msg="Could not find flavor %s" % flavor)
- else:
- flavor_dict = self.conn.get_flavor_by_ram(flavor_ram, flavor_include)
- if not flavor_dict:
- self.fail(msg="Could not find any matching flavor")
-
- nics = self._network_args()
-
- self.params['meta'] = _parse_meta(self.params['meta'])
-
- bootkwargs = self.check_versioned(
- name=self.params['name'],
- image=image_id,
- flavor=flavor_dict['id'],
- nics=nics,
- meta=self.params['meta'],
- security_groups=self.params['security_groups'],
- userdata=self.params['userdata'],
- config_drive=self.params['config_drive'],
- )
- for optional_param in (
- 'key_name', 'availability_zone', 'network',
- 'scheduler_hints', 'volume_size', 'volumes',
- 'description'):
- if self.params[optional_param]:
- bootkwargs[optional_param] = self.params[optional_param]
-
- server = self.conn.create_server(
- ip_pool=self.params['floating_ip_pools'],
- ips=self.params['floating_ips'],
- auto_ip=self.params['auto_ip'],
- boot_volume=self.params['boot_volume'],
- boot_from_volume=self.params['boot_from_volume'],
- terminate_volume=self.params['terminate_volume'],
- reuse_ips=self.params['reuse_ips'],
- wait=self.params['wait'], timeout=self.params['timeout'],
- **bootkwargs
- )
-
- self._exit_hostvars(server)
-
- def _update_server(self, server):
- changed = False
-
- self.params['meta'] = _parse_meta(self.params['meta'])
-
- # self.conn.set_server_metadata only updates the key=value pairs, it doesn't
- # touch existing ones
- update_meta = {}
- for (k, v) in self.params['meta'].items():
- if k not in server.metadata or server.metadata[k] != v:
- update_meta[k] = v
-
- if update_meta:
- self.conn.set_server_metadata(server, update_meta)
- changed = True
- # Refresh server vars
- server = self.conn.get_server(self.params['name'])
-
- return (changed, server)
-
- def _delete_server(self):
- try:
- self.conn.delete_server(
- self.params['name'], wait=self.params['wait'],
- timeout=self.params['timeout'],
- delete_ips=self.params['delete_fip'])
- except Exception as e:
- self.fail(msg="Error in deleting vm: %s" % e)
- self.exit(changed=True, result='deleted')
-
- def _network_args(self):
- args = []
- nics = self.params['nics']
-
- if not isinstance(nics, list):
- self.fail(msg='The \'nics\' parameter must be a list.')
-
- for num, net in enumerate(_parse_nics(nics)):
- if not isinstance(net, dict):
- self.fail(
- msg='Each entry in the \'nics\' parameter must be a dict.')
-
- if net.get('net-id'):
- args.append(net)
- elif net.get('net-name'):
- by_name = self.conn.get_network(net['net-name'])
- if not by_name:
- self.fail(
- msg='Could not find network by net-name: %s' %
- net['net-name'])
- resolved_net = net.copy()
- del resolved_net['net-name']
- resolved_net['net-id'] = by_name['id']
- args.append(resolved_net)
- elif net.get('port-id'):
- args.append(net)
- elif net.get('port-name'):
- by_name = self.conn.get_port(net['port-name'])
- if not by_name:
- self.fail(
- msg='Could not find port by port-name: %s' %
- net['port-name'])
- resolved_net = net.copy()
- del resolved_net['port-name']
- resolved_net['port-id'] = by_name['id']
- args.append(resolved_net)
-
- if 'tag' in net:
- args[num]['tag'] = net['tag']
- return args
-
- def _detach_ip_list(self, server, extra_ips):
- for ip in extra_ips:
- ip_id = self.conn.get_floating_ip(
- id=None, filters={'floating_ip_address': ip})
- self.conn.detach_ip_from_server(
- server_id=server.id, floating_ip_id=ip_id)
-
- def _check_ips(self, server):
- changed = False
-
- auto_ip = self.params['auto_ip']
- floating_ips = self.params['floating_ips']
- floating_ip_pools = self.params['floating_ip_pools']
-
- if floating_ip_pools or floating_ips:
- ips = openstack_find_nova_addresses(server.addresses, 'floating')
- if not ips:
- # If we're configured to have a floating but we don't have one,
- # let's add one
- server = self.conn.add_ips_to_server(
- server,
- auto_ip=auto_ip,
- ips=floating_ips,
- ip_pool=floating_ip_pools,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- changed = True
- elif floating_ips:
- # we were configured to have specific ips, let's make sure we have
- # those
- missing_ips = []
- for ip in floating_ips:
- if ip not in ips:
- missing_ips.append(ip)
- if missing_ips:
- server = self.conn.add_ip_list(server, missing_ips,
- wait=self.params['wait'],
- timeout=self.params['timeout'])
- changed = True
- extra_ips = []
- for ip in ips:
- if ip not in floating_ips:
- extra_ips.append(ip)
- if extra_ips:
- self._detach_ip_list(server, extra_ips)
- changed = True
- elif auto_ip:
- if server['interface_ip']:
- changed = False
- else:
- # We're configured for auto_ip but we're not showing an
- # interface_ip. Maybe someone deleted an IP out from under us.
- server = self.conn.add_ips_to_server(
- server,
- auto_ip=auto_ip,
- ips=floating_ips,
- ip_pool=floating_ip_pools,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- changed = True
- return (changed, server)
-
- def _check_security_groups(self, server):
- changed = False
-
- # server security groups were added to shade in 1.19. Until then this
- # module simply ignored trying to update security groups and only set them
- # on newly created hosts.
- if not (
- hasattr(self.conn, 'add_server_security_groups')
- and hasattr(self.conn, 'remove_server_security_groups')
- ):
- return changed, server
-
- module_security_groups = set(self.params['security_groups'])
- server_security_groups = set(sg['name'] for sg in server.security_groups)
-
- add_sgs = module_security_groups - server_security_groups
- remove_sgs = server_security_groups - module_security_groups
-
- if add_sgs:
- self.conn.add_server_security_groups(server, list(add_sgs))
- changed = True
-
- if remove_sgs:
- self.conn.remove_server_security_groups(server, list(remove_sgs))
- changed = True
-
- return (changed, server)
-
-
-def main():
- module = ServerModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server_action.py b/ansible_collections/openstack/cloud/plugins/modules/os_server_action.py
deleted file mode 100644
index 341ff3742..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server_action.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server_action
-short_description: Perform actions on Compute Instances from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Perform server actions on an existing compute instance from OpenStack.
- This module does not return any data other than changed true/false.
- When I(action) is 'rebuild', then I(image) parameter is required.
-options:
- server:
- description:
- - Name or ID of the instance
- required: true
- type: str
- wait:
- description:
- - If the module should wait for the instance action to be performed.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the instance to perform
- the requested action.
- default: 180
- type: int
- action:
- description:
- - Perform the given action. The lock and unlock actions always return
- changed as the servers API does not provide lock status.
- choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
- rebuild, shelve, shelve_offload, unshelve]
- type: str
- required: true
- image:
- description:
- - Image the server should be rebuilt with
- type: str
- admin_password:
- description:
- - Admin password for server to rebuild
- type: str
- all_projects:
- description:
- - Whether to search for server in all projects or just the current
- auth scoped project.
- type: bool
- default: 'no'
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Pauses a compute instance
-- openstack.cloud.server_action:
- action: pause
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- server: vm1
- timeout: 200
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-# If I(action) is set to C(shelve) then according to OpenStack's Compute API, the shelved
-# server is in one of two possible states:
-#
-# SHELVED: The server is in shelved state. Depends on the shelve offload time,
-# the server will be automatically shelved off loaded.
-# SHELVED_OFFLOADED: The shelved server is offloaded (removed from the compute host) and
-# it needs unshelved action to be used again.
-#
-# But wait_for_server can only wait for a single server state. If a shelved server is offloaded
-# immediately, then a exceptions.ResourceTimeout will be raised if I(action) is set to C(shelve).
-# This is likely to happen because shelved_offload_time in Nova's config is set to 0 by default.
-# This also applies if you boot the server from volumes.
-#
-# Calling C(shelve_offload) instead of C(shelve) will also fail most likely because the default
-# policy does not allow C(shelve_offload) for non-admin users while C(shelve) is allowed for
-# admin users and server owners.
-#
-# As we cannot retrieve shelved_offload_time from Nova's config, we fall back to waiting for
-# one state and if that fails then we fetch the server's state and match it against the other
-# valid states from _action_map.
-#
-# Ref.: https://docs.openstack.org/api-guide/compute/server_concepts.html
-
-_action_map = {'stop': ['SHUTOFF'],
- 'start': ['ACTIVE'],
- 'pause': ['PAUSED'],
- 'unpause': ['ACTIVE'],
- 'lock': ['ACTIVE'], # API doesn't show lock/unlock status
- 'unlock': ['ACTIVE'],
- 'suspend': ['SUSPENDED'],
- 'resume': ['ACTIVE'],
- 'rebuild': ['ACTIVE'],
- 'shelve': ['SHELVED_OFFLOADED', 'SHELVED'],
- 'shelve_offload': ['SHELVED_OFFLOADED'],
- 'unshelve': ['ACTIVE']}
-
-_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock', 'shelve_offload']
-
-
-class ServerActionModule(OpenStackModule):
- deprecated_names = ('os_server_action', 'openstack.cloud.os_server_action')
-
- argument_spec = dict(
- server=dict(required=True, type='str'),
- action=dict(required=True, type='str',
- choices=['stop', 'start', 'pause', 'unpause',
- 'lock', 'unlock', 'suspend', 'resume',
- 'rebuild', 'shelve', 'shelve_offload', 'unshelve']),
- image=dict(required=False, type='str'),
- admin_password=dict(required=False, type='str', no_log=True),
- all_projects=dict(required=False, type='bool', default=False),
- )
- module_kwargs = dict(
- required_if=[('action', 'rebuild', ['image'])],
- supports_check_mode=True,
- )
-
- def run(self):
- os_server = self._preliminary_checks()
- self._execute_server_action(os_server)
- # for some reason we don't wait for lock and unlock before exit
- if self.params['action'] not in ('lock', 'unlock'):
- if self.params['wait']:
- self._wait(os_server)
- self.exit_json(changed=True)
-
- def _preliminary_checks(self):
- # Using Munch object for getting information about a server
- os_server = self.conn.get_server(
- self.params['server'],
- all_projects=self.params['all_projects'],
- )
- if not os_server:
- self.fail_json(msg='Could not find server %s' % self.params['server'])
- # check mode
- if self.ansible.check_mode:
- self.exit_json(changed=self.__system_state_change(os_server))
- # examine special cases
- # lock, unlock and rebuild don't depend on state, just do it
- if self.params['action'] not in ('lock', 'unlock', 'rebuild'):
- if not self.__system_state_change(os_server):
- self.exit_json(changed=False)
- return os_server
-
- def _execute_server_action(self, os_server):
- if self.params['action'] == 'rebuild':
- return self._rebuild_server(os_server)
- if self.params['action'] == 'shelve_offload':
- # shelve_offload is not supported in OpenstackSDK
- return self._action(os_server, json={'shelveOffload': None})
- action_name = self.params['action'] + "_server"
- try:
- func_name = getattr(self.conn.compute, action_name)
- except AttributeError:
- self.fail_json(
- msg="Method %s wasn't found in OpenstackSDK compute" % action_name)
- func_name(os_server)
-
- def _rebuild_server(self, os_server):
- # rebuild should ensure images exists
- try:
- image = self.conn.get_image(self.params['image'])
- except Exception as e:
- self.fail_json(
- msg="Can't find the image %s: %s" % (self.params['image'], e))
- if not image:
- self.fail_json(msg="Image %s was not found!" % self.params['image'])
- # admin_password is required by SDK, but not required by Nova API
- if self.params['admin_password']:
- self.conn.compute.rebuild_server(
- server=os_server,
- name=os_server['name'],
- image=image['id'],
- admin_password=self.params['admin_password']
- )
- else:
- self._action(os_server, json={'rebuild': {'imageRef': image['id']}})
-
- def _action(self, os_server, json):
- response = self.conn.compute.post(
- '/servers/{server_id}/action'.format(server_id=os_server['id']),
- json=json)
- self.sdk.exceptions.raise_from_response(response)
- return response
-
- def _wait(self, os_server):
- """Wait for the server to reach the desired state for the given action."""
- # The wait_for_server function needs a Server object instead of the
- # Munch object returned by self.conn.get_server
- server = self.conn.compute.get_server(os_server['id'])
- states = _action_map[self.params['action']]
-
- try:
- self.conn.compute.wait_for_server(
- server,
- status=states[0],
- wait=self.params['timeout'])
- except self.sdk.exceptions.ResourceTimeout:
- # raise if there is only one valid state
- if len(states) < 2:
- raise
- # fetch current server status and compare to other valid states
- server = self.conn.compute.get_server(os_server['id'])
- if server.status not in states:
- raise
-
- def __system_state_change(self, os_server):
- """Check if system state would change."""
- return os_server.status not in _action_map[self.params['action']]
-
-
-def main():
- module = ServerActionModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server_group.py b/ansible_collections/openstack/cloud/plugins/modules/os_server_group.py
deleted file mode 100644
index 84f59e6cb..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server_group.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2016 Catalyst IT Limited
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server_group
-short_description: Manage OpenStack server groups
-author: OpenStack Ansible SIG
-description:
- - Add or remove server groups from OpenStack.
-options:
- state:
- description:
- - Indicate desired state of the resource. When I(state) is 'present',
- then I(policies) is required.
- choices: ['present', 'absent']
- required: false
- default: present
- type: str
- name:
- description:
- - Server group name.
- required: true
- type: str
- policies:
- description:
- - A list of one or more policy names to associate with the server
- group. The list must contain at least one policy name. The current
- valid policy names are anti-affinity, affinity, soft-anti-affinity
- and soft-affinity.
- required: false
- type: list
- elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a server group with 'affinity' policy.
-- openstack.cloud.server_group:
- state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: my_server_group
- policies:
- - affinity
-
-# Delete 'my_server_group' server group.
-- openstack.cloud.server_group:
- state: absent
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- name: my_server_group
-'''
-
-RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: The name of the server group.
- returned: success
- type: str
-policies:
- description: A list of one or more policy names of the server group.
- returned: success
- type: list
-members:
- description: A list of members in the server group.
- returned: success
- type: list
-metadata:
- description: Metadata key and value pairs.
- returned: success
- type: dict
-project_id:
- description: The project ID who owns the server group.
- returned: success
- type: str
-user_id:
- description: The user ID who owns the server group.
- returned: success
- type: str
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ServerGroupModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- policies=dict(required=False, type='list', elements='str'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True,
- )
-
- def _system_state_change(self, state, server_group):
- if state == 'present' and not server_group:
- return True
- if state == 'absent' and server_group:
- return True
-
- return False
-
- def run(self):
- name = self.params['name']
- policies = self.params['policies']
- state = self.params['state']
-
- server_group = self.conn.get_server_group(name)
-
- if self.ansible.check_mode:
- self.exit_json(
- changed=self._system_state_change(state, server_group)
- )
-
- changed = False
- if state == 'present':
- if not server_group:
- if not policies:
- self.fail_json(
- msg="Parameter 'policies' is required in Server Group "
- "Create"
- )
- server_group = self.conn.create_server_group(name, policies)
- changed = True
-
- self.exit_json(
- changed=changed,
- id=server_group['id'],
- server_group=server_group
- )
- if state == 'absent':
- if server_group:
- self.conn.delete_server_group(server_group['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = ServerGroupModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_server_info.py
deleted file mode 100644
index bac1d2114..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server_info.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server_info
-short_description: Retrieve information about one or more compute instances
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about server instances from OpenStack.
- - This module was called C(os_server_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.server_info) module no longer returns C(ansible_facts)!
-notes:
- - The result contains a list of servers.
-options:
- server:
- description:
- - restrict results to servers with names or UUID matching
- this glob expression (e.g., <web*>).
- type: str
- detailed:
- description:
- - when true, return additional detail about servers at the expense
- of additional API calls.
- type: bool
- default: 'no'
- filters:
- description:
- - restrict results to servers matching a dictionary of
- filters
- type: dict
- all_projects:
- description:
- - Whether to list servers from all projects or just the current auth
- scoped project.
- type: bool
- default: 'no'
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about all servers named <web*> that are in an active state:
-- openstack.cloud.server_info:
- cloud: rax-dfw
- server: web*
- filters:
- vm_state: active
- register: result
-- debug:
- msg: "{{ result.openstack_servers }}"
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ServerInfoModule(OpenStackModule):
-
- deprecated_names = ('os_server_info', 'openstack.cloud.os_server_info')
-
- argument_spec = dict(
- server=dict(required=False),
- detailed=dict(required=False, type='bool', default=False),
- filters=dict(required=False, type='dict', default=None),
- all_projects=dict(required=False, type='bool', default=False),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
-
- kwargs = self.check_versioned(
- detailed=self.params['detailed'],
- filters=self.params['filters'],
- all_projects=self.params['all_projects']
- )
- if self.params['server']:
- kwargs['name_or_id'] = self.params['server']
- openstack_servers = self.conn.search_servers(**kwargs)
- self.exit(changed=False, openstack_servers=openstack_servers)
-
-
-def main():
- module = ServerInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server_metadata.py b/ansible_collections/openstack/cloud/plugins/modules/os_server_metadata.py
deleted file mode 100644
index a1207e3b3..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server_metadata.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright (c) 2016, Mario Santos <mario.rf.santos@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server_metadata
-short_description: Add/Update/Delete Metadata in Compute Instances from OpenStack
-author: OpenStack Ansible SIG
-description:
- - Add, Update or Remove metadata in compute instances from OpenStack.
-options:
- server:
- description:
- - Name of the instance to update the metadata
- required: true
- aliases: ['name']
- type: str
- meta:
- description:
- - 'A list of key value pairs that should be provided as a metadata to
- the instance or a string containing a list of key-value pairs.
- Eg: meta: "key1=value1,key2=value2"'
- required: true
- type: dict
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- availability_zone:
- description:
- - Availability zone in which to create the snapshot.
- required: false
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Creates or updates hostname=test1 as metadata of the server instance vm1
-- name: add metadata to compute instance
- hosts: localhost
- tasks:
- - name: add metadata to instance
- openstack.cloud.server_metadata:
- state: present
- auth:
- auth_url: https://openstack-api.example.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
- name: vm1
- meta:
- hostname: test1
- group: group1
-
-# Removes the keys under meta from the instance named vm1
-- name: delete metadata from compute instance
- hosts: localhost
- tasks:
- - name: delete metadata from instance
- openstack.cloud.server_metadata:
- state: absent
- auth:
- auth_url: https://openstack-api.example.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
- name: vm1
- meta:
- hostname:
- group:
-'''
-
-RETURN = '''
-server_id:
- description: The compute instance id where the change was made
- returned: success
- type: str
- sample: "324c4e91-3e03-4f62-9a4d-06119a8a8d16"
-metadata:
- description: The metadata of compute instance after the change
- returned: success
- type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class ServerMetadataModule(OpenStackModule):
- argument_spec = dict(
- server=dict(required=True, aliases=['name']),
- meta=dict(required=True, type='dict'),
- state=dict(default='present', choices=['absent', 'present']),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _needs_update(self, server_metadata=None, metadata=None):
- if server_metadata is None:
- server_metadata = {}
- if metadata is None:
- metadata = {}
- return len(set(metadata.items()) - set(server_metadata.items())) != 0
-
- def _get_keys_to_delete(self, server_metadata_keys=None, metadata_keys=None):
- if server_metadata_keys is None:
- server_metadata_keys = []
- if metadata_keys is None:
- metadata_keys = []
- return set(server_metadata_keys) & set(metadata_keys)
-
- def run(self):
- state = self.params['state']
- server_param = self.params['server']
- meta_param = self.params['meta']
- changed = False
-
- server = self.conn.get_server(server_param)
- if not server:
- self.fail_json(
- msg='Could not find server {0}'.format(server_param))
-
- if state == 'present':
- # check if it needs update
- if self._needs_update(
- server_metadata=server.metadata, metadata=meta_param
- ):
- if not self.ansible.check_mode:
- self.conn.set_server_metadata(server_param, meta_param)
- changed = True
- elif state == 'absent':
- # remove from params the keys that do not exist in the server
- keys_to_delete = self._get_keys_to_delete(
- server.metadata.keys(), meta_param.keys())
- if len(keys_to_delete) > 0:
- if not self.ansible.check_mode:
- self.conn.delete_server_metadata(
- server_param, keys_to_delete)
- changed = True
-
- if changed:
- server = self.conn.get_server(server_param)
-
- self.exit_json(
- changed=changed, server_id=server.id, metadata=server.metadata)
-
-
-def main():
- module = ServerMetadataModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_server_volume.py b/ansible_collections/openstack/cloud/plugins/modules/os_server_volume.py
deleted file mode 100644
index 1deb8fa6e..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_server_volume.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: server_volume
-short_description: Attach/Detach Volumes from OpenStack VM's
-author: OpenStack Ansible SIG
-description:
- - Attach or Detach volumes from OpenStack VM's
-options:
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- required: false
- type: str
- server:
- description:
- - Name or ID of server you want to attach a volume to
- required: true
- type: str
- volume:
- description:
- - Name or id of volume you want to attach to a server
- required: true
- type: str
- device:
- description:
- - Device you want to attach. Defaults to auto finding a device name.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Attaches a volume to a compute host
-- name: attach a volume
- hosts: localhost
- tasks:
- - name: attach volume to host
- openstack.cloud.server_volume:
- state: present
- cloud: mordred
- server: Mysql-server
- volume: mysql-data
- device: /dev/vdb
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-def _system_state_change(state, device):
- """Check if system state would change."""
- if state == 'present':
- if device:
- return False
- return True
- if state == 'absent':
- if device:
- return True
- return False
- return False
-
-
-class ServerVolumeModule(OpenStackModule):
-
- argument_spec = dict(
- server=dict(required=True),
- volume=dict(required=True),
- device=dict(default=None), # None == auto choose device name
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- def run(self):
-
- state = self.params['state']
- wait = self.params['wait']
- timeout = self.params['timeout']
-
- server = self.conn.get_server(self.params['server'])
- volume = self.conn.get_volume(self.params['volume'])
-
- if not server:
- self.fail(msg='server %s is not found' % self.params['server'])
-
- if not volume:
- self.fail(msg='volume %s is not found' % self.params['volume'])
-
- dev = self.conn.get_volume_attach_device(volume, server.id)
-
- if self.ansible.check_mode:
- self.exit(changed=_system_state_change(state, dev))
-
- if state == 'present':
- changed = False
- if not dev:
- changed = True
- self.conn.attach_volume(server, volume, self.params['device'],
- wait=wait, timeout=timeout)
-
- server = self.conn.get_server(self.params['server']) # refresh
- volume = self.conn.get_volume(self.params['volume']) # refresh
- hostvars = self.conn.get_openstack_vars(server)
-
- self.exit(
- changed=changed,
- id=volume['id'],
- attachments=volume['attachments'],
- openstack=hostvars
- )
-
- elif state == 'absent':
- if not dev:
- # Volume is not attached to this server
- self.exit(changed=False)
-
- self.conn.detach_volume(server, volume, wait=wait, timeout=timeout)
- self.exit(
- changed=True,
- result='Detached volume from server'
- )
-
-
-def main():
- module = ServerVolumeModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_stack.py b/ansible_collections/openstack/cloud/plugins/modules/os_stack.py
deleted file mode 100644
index 95b7bef5e..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_stack.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
-# (c) 2016, Steve Baker <sbaker@redhat.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: stack
-short_description: Add/Remove Heat Stack
-author: OpenStack Ansible SIG
-description:
- - Add or Remove a Stack to an OpenStack Heat
-options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Name of the stack that should be created, name could be char and digit, no space
- required: true
- type: str
- tag:
- description:
- - Tag for the stack that should be created, name could be char and digit, no space
- type: str
- template:
- description:
- - Path of the template file to use for the stack creation
- type: str
- environment:
- description:
- - List of environment files that should be used for the stack creation
- type: list
- elements: str
- parameters:
- description:
- - Dictionary of parameters for the stack creation
- type: dict
- rollback:
- description:
- - Rollback stack creation
- type: bool
- default: false
- timeout:
- description:
- - Maximum number of seconds to wait for the stack creation
- default: 3600
- type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-EXAMPLES = '''
----
-- name: create stack
- ignore_errors: True
- register: stack_create
- openstack.cloud.stack:
- name: "{{ stack_name }}"
- tag: "{{ tag_name }}"
- state: present
- template: "/path/to/my_stack.yaml"
- environment:
- - /path/to/resource-registry.yaml
- - /path/to/environment.yaml
- parameters:
- bmc_flavor: m1.medium
- bmc_image: CentOS
- key_name: default
- private_net: "{{ private_net_param }}"
- node_count: 2
- name: undercloud
- image: CentOS
- my_flavor: m1.large
- external_net: "{{ external_net_param }}"
-'''
-
-RETURN = '''
-id:
- description: Stack ID.
- type: str
- sample: "97a3f543-8136-4570-920e-fd7605c989d6"
- returned: always
-
-stack:
- description: stack info
- type: complex
- returned: always
- contains:
- action:
- description: Action, could be Create or Update.
- type: str
- sample: "CREATE"
- creation_time:
- description: Time when the action has been made.
- type: str
- sample: "2016-07-05T17:38:12Z"
- description:
- description: Description of the Stack provided in the heat template.
- type: str
- sample: "HOT template to create a new instance and networks"
- id:
- description: Stack ID.
- type: str
- sample: "97a3f543-8136-4570-920e-fd7605c989d6"
- name:
- description: Name of the Stack
- type: str
- sample: "test-stack"
- identifier:
- description: Identifier of the current Stack action.
- type: str
- sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
- links:
- description: Links to the current Stack.
- type: list
- elements: dict
- sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
- outputs:
- description: Output returned by the Stack.
- type: list
- elements: dict
- sample: "{'description': 'IP address of server1 in private network',
- 'output_key': 'server1_private_ip',
- 'output_value': '10.1.10.103'}"
- parameters:
- description: Parameters of the current Stack
- type: dict
- sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
- 'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
- 'OS::stack_name': 'test-stack',
- 'stack_status': 'CREATE_COMPLETE',
- 'stack_status_reason': 'Stack CREATE completed successfully',
- 'status': 'COMPLETE',
- 'template_description': 'HOT template to create a new instance and networks',
- 'timeout_mins': 60,
- 'updated_time': null}"
-'''
-
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class StackModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- tag=dict(required=False, default=None, min_ver='0.28.0'),
- template=dict(default=None),
- environment=dict(default=None, type='list', elements='str'),
- parameters=dict(default={}, type='dict'),
- rollback=dict(default=False, type='bool'),
- timeout=dict(default=3600, type='int'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _create_stack(self, stack, parameters):
- stack = self.conn.create_stack(
- self.params['name'],
- template_file=self.params['template'],
- environment_files=self.params['environment'],
- timeout=self.params['timeout'],
- wait=True,
- rollback=self.params['rollback'],
- **parameters)
-
- stack = self.conn.get_stack(stack.id, None)
- if stack.stack_status == 'CREATE_COMPLETE':
- return stack
- else:
- self.fail_json(msg="Failure in creating stack: {0}".format(stack))
-
- def _update_stack(self, stack, parameters):
- stack = self.conn.update_stack(
- self.params['name'],
- template_file=self.params['template'],
- environment_files=self.params['environment'],
- timeout=self.params['timeout'],
- rollback=self.params['rollback'],
- wait=self.params['wait'],
- **parameters)
-
- if stack['stack_status'] == 'UPDATE_COMPLETE':
- return stack
- else:
- self.fail_json(msg="Failure in updating stack: %s" %
- stack['stack_status_reason'])
-
- def _system_state_change(self, stack):
- state = self.params['state']
- if state == 'present':
- if not stack:
- return True
- if state == 'absent' and stack:
- return True
- return False
-
- def run(self):
- state = self.params['state']
- name = self.params['name']
- # Check for required parameters when state == 'present'
- if state == 'present':
- for p in ['template']:
- if not self.params[p]:
- self.fail_json(msg='%s required with present state' % p)
-
- stack = self.conn.get_stack(name)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(stack))
-
- if state == 'present':
- parameters = self.params['parameters']
- if not stack:
- stack = self._create_stack(stack, parameters)
- else:
- stack = self._update_stack(stack, parameters)
- self.exit_json(changed=True,
- stack=stack,
- id=stack.id)
- elif state == 'absent':
- if not stack:
- changed = False
- else:
- changed = True
- if not self.conn.delete_stack(name, wait=self.params['wait']):
- self.fail_json(msg='delete stack failed for stack: %s' % name)
- self.exit_json(changed=changed)
-
-
-def main():
- module = StackModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_subnet.py b/ansible_collections/openstack/cloud/plugins/modules/os_subnet.py
deleted file mode 100644
index dfe1eaca3..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_subnet.py
+++ /dev/null
@@ -1,364 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# (c) 2013, Benno Joy <benno@ansible.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: subnet
-short_description: Add/Remove subnet to an OpenStack network
-author: OpenStack Ansible SIG
-description:
- - Add or Remove a subnet to an OpenStack network
-options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- network_name:
- description:
- - Name of the network to which the subnet should be attached
- - Required when I(state) is 'present'
- type: str
- name:
- description:
- - The name of the subnet that should be created. Although Neutron
- allows for non-unique subnet names, this module enforces subnet
- name uniqueness.
- required: true
- type: str
- cidr:
- description:
- - The CIDR representation of the subnet that should be assigned to
- the subnet. Required when I(state) is 'present' and a subnetpool
- is not specified.
- type: str
- ip_version:
- description:
- - The IP version of the subnet 4 or 6
- default: '4'
- type: str
- choices: ['4', '6']
- enable_dhcp:
- description:
- - Whether DHCP should be enabled for this subnet.
- type: bool
- default: 'yes'
- gateway_ip:
- description:
- - The ip that would be assigned to the gateway for this subnet
- type: str
- no_gateway_ip:
- description:
- - The gateway IP would not be assigned for this subnet
- type: bool
- default: 'no'
- dns_nameservers:
- description:
- - List of DNS nameservers for this subnet.
- type: list
- elements: str
- allocation_pool_start:
- description:
- - From the subnet pool the starting address from which the IP should
- be allocated.
- type: str
- allocation_pool_end:
- description:
- - From the subnet pool the last IP that should be assigned to the
- virtual machines.
- type: str
- host_routes:
- description:
- - A list of host route dictionaries for the subnet.
- type: list
- elements: dict
- suboptions:
- destination:
- description: The destination network (CIDR).
- type: str
- required: true
- nexthop:
- description: The next hop (aka gateway) for the I(destination).
- type: str
- required: true
- ipv6_ra_mode:
- description:
- - IPv6 router advertisement mode
- choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
- type: str
- ipv6_address_mode:
- description:
- - IPv6 address mode
- choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
- type: str
- use_default_subnetpool:
- description:
- - Use the default subnetpool for I(ip_version) to obtain a CIDR.
- type: bool
- default: 'no'
- project:
- description:
- - Project name or ID containing the subnet (name admin-only)
- type: str
- extra_specs:
- description:
- - Dictionary with extra key/value pairs passed to the API
- required: false
- default: {}
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a new (or update an existing) subnet on the specified network
-- openstack.cloud.subnet:
- state: present
- network_name: network1
- name: net1subnet
- cidr: 192.168.0.0/24
- dns_nameservers:
- - 8.8.8.7
- - 8.8.8.8
- host_routes:
- - destination: 0.0.0.0/0
- nexthop: 12.34.56.78
- - destination: 192.168.0.0/24
- nexthop: 192.168.0.1
-
-# Delete a subnet
-- openstack.cloud.subnet:
- state: absent
- name: net1subnet
-
-# Create an ipv6 stateless subnet
-- openstack.cloud.subnet:
- state: present
- name: intv6
- network_name: internal
- ip_version: 6
- cidr: 2db8:1::/64
- dns_nameservers:
- - 2001:4860:4860::8888
- - 2001:4860:4860::8844
- ipv6_ra_mode: dhcpv6-stateless
- ipv6_address_mode: dhcpv6-stateless
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class SubnetModule(OpenStackModule):
- ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
- argument_spec = dict(
- name=dict(type='str', required=True),
- network_name=dict(type='str'),
- cidr=dict(type='str'),
- ip_version=dict(type='str', default='4', choices=['4', '6']),
- enable_dhcp=dict(type='bool', default=True),
- gateway_ip=dict(type='str'),
- no_gateway_ip=dict(type='bool', default=False),
- dns_nameservers=dict(type='list', default=None, elements='str'),
- allocation_pool_start=dict(type='str'),
- allocation_pool_end=dict(type='str'),
- host_routes=dict(type='list', default=None, elements='dict'),
- ipv6_ra_mode=dict(type='str', choices=ipv6_mode_choices),
- ipv6_address_mode=dict(type='str', choices=ipv6_mode_choices),
- use_default_subnetpool=dict(type='bool', default=False),
- extra_specs=dict(type='dict', default=dict()),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- project=dict(type='str'),
- )
-
- module_kwargs = dict(
- supports_check_mode=True,
- required_together=[['allocation_pool_end', 'allocation_pool_start']]
- )
-
- def _can_update(self, subnet, filters=None):
- """Check for differences in non-updatable values"""
- network_name = self.params['network_name']
- ip_version = int(self.params['ip_version'])
- ipv6_ra_mode = self.params['ipv6_ra_mode']
- ipv6_a_mode = self.params['ipv6_address_mode']
-
- if network_name:
- network = self.conn.get_network(network_name, filters)
- if network:
- netid = network['id']
- if netid != subnet['network_id']:
- self.fail_json(msg='Cannot update network_name in existing subnet')
- else:
- self.fail_json(msg='No network found for %s' % network_name)
-
- if ip_version and subnet['ip_version'] != ip_version:
- self.fail_json(msg='Cannot update ip_version in existing subnet')
- if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
- self.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
- if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
- self.fail_json(msg='Cannot update ipv6_address_mode in existing subnet')
-
- def _needs_update(self, subnet, filters=None):
- """Check for differences in the updatable values."""
-
- # First check if we are trying to update something we're not allowed to
- self._can_update(subnet, filters)
-
- # now check for the things we are allowed to update
- enable_dhcp = self.params['enable_dhcp']
- subnet_name = self.params['name']
- pool_start = self.params['allocation_pool_start']
- pool_end = self.params['allocation_pool_end']
- gateway_ip = self.params['gateway_ip']
- no_gateway_ip = self.params['no_gateway_ip']
- dns = self.params['dns_nameservers']
- host_routes = self.params['host_routes']
- if pool_start and pool_end:
- pool = dict(start=pool_start, end=pool_end)
- else:
- pool = None
-
- changes = dict()
- if subnet['enable_dhcp'] != enable_dhcp:
- changes['enable_dhcp'] = enable_dhcp
- if subnet_name and subnet['name'] != subnet_name:
- changes['subnet_name'] = subnet_name
- if pool and (not subnet['allocation_pools'] or subnet['allocation_pools'] != [pool]):
- changes['allocation_pools'] = [pool]
- if gateway_ip and subnet['gateway_ip'] != gateway_ip:
- changes['gateway_ip'] = gateway_ip
- if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
- changes['dns_nameservers'] = dns
- if host_routes:
- curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
- new_hr = sorted(host_routes, key=lambda t: t.keys())
- if curr_hr != new_hr:
- changes['host_routes'] = host_routes
- if no_gateway_ip and subnet['gateway_ip']:
- changes['disable_gateway_ip'] = no_gateway_ip
- return changes
-
- def _system_state_change(self, subnet, filters=None):
- state = self.params['state']
- if state == 'present':
- if not subnet:
- return True
- return bool(self._needs_update(subnet, filters))
- if state == 'absent' and subnet:
- return True
- return False
-
- def run(self):
-
- state = self.params['state']
- network_name = self.params['network_name']
- cidr = self.params['cidr']
- ip_version = self.params['ip_version']
- enable_dhcp = self.params['enable_dhcp']
- subnet_name = self.params['name']
- gateway_ip = self.params['gateway_ip']
- no_gateway_ip = self.params['no_gateway_ip']
- dns = self.params['dns_nameservers']
- pool_start = self.params['allocation_pool_start']
- pool_end = self.params['allocation_pool_end']
- host_routes = self.params['host_routes']
- ipv6_ra_mode = self.params['ipv6_ra_mode']
- ipv6_a_mode = self.params['ipv6_address_mode']
- use_default_subnetpool = self.params['use_default_subnetpool']
- project = self.params.pop('project')
- extra_specs = self.params['extra_specs']
-
- # Check for required parameters when state == 'present'
- if state == 'present':
- if not self.params['network_name']:
- self.fail(msg='network_name required with present state')
- if (
- not self.params['cidr']
- and not use_default_subnetpool
- and not extra_specs.get('subnetpool_id', False)
- ):
- self.fail(msg='cidr or use_default_subnetpool or '
- 'subnetpool_id required with present state')
-
- if pool_start and pool_end:
- pool = [dict(start=pool_start, end=pool_end)]
- else:
- pool = None
-
- if no_gateway_ip and gateway_ip:
- self.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- filters = {'tenant_id': project_id}
- else:
- project_id = None
- filters = None
-
- subnet = self.conn.get_subnet(subnet_name, filters=filters)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(subnet, filters))
-
- if state == 'present':
- if not subnet:
- kwargs = dict(
- cidr=cidr,
- ip_version=ip_version,
- enable_dhcp=enable_dhcp,
- subnet_name=subnet_name,
- gateway_ip=gateway_ip,
- disable_gateway_ip=no_gateway_ip,
- dns_nameservers=dns,
- allocation_pools=pool,
- host_routes=host_routes,
- ipv6_ra_mode=ipv6_ra_mode,
- ipv6_address_mode=ipv6_a_mode,
- tenant_id=project_id)
- dup_args = set(kwargs.keys()) & set(extra_specs.keys())
- if dup_args:
- raise ValueError('Duplicate key(s) {0} in extra_specs'
- .format(list(dup_args)))
- if use_default_subnetpool:
- kwargs['use_default_subnetpool'] = use_default_subnetpool
- kwargs = dict(kwargs, **extra_specs)
- subnet = self.conn.create_subnet(network_name, **kwargs)
- changed = True
- else:
- changes = self._needs_update(subnet, filters)
- if changes:
- subnet = self.conn.update_subnet(subnet['id'], **changes)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed,
- subnet=subnet,
- id=subnet['id'])
-
- elif state == 'absent':
- if not subnet:
- changed = False
- else:
- changed = True
- self.conn.delete_subnet(subnet_name)
- self.exit_json(changed=changed)
-
-
-def main():
- module = SubnetModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_subnets_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_subnets_info.py
deleted file mode 100644
index 7a771b53a..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_subnets_info.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: subnets_info
-short_description: Retrieve information about one or more OpenStack subnets.
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about one or more subnets from OpenStack.
- - This module was called C(openstack.cloud.subnets_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.subnets_info) module no longer returns C(ansible_facts)!
-options:
- name:
- description:
- - Name or ID of the subnet.
- - Alias 'subnet' added in version 2.8.
- required: false
- aliases: ['subnet']
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- required: false
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: Gather information about previously created subnets
- openstack.cloud.subnets_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- register: result
-
-- name: Show openstack subnets
- debug:
- msg: "{{ result.openstack_subnets }}"
-
-- name: Gather information about a previously created subnet by name
- openstack.cloud.subnets_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- name: subnet1
- register: result
-
-- name: Show openstack subnets
- debug:
- msg: "{{ result.openstack_subnets }}"
-
-- name: Gather information about a previously created subnet with filter
- # Note: name and filters parameters are not mutually exclusive
- openstack.cloud.subnets_info:
- auth:
- auth_url: https://identity.example.com
- username: user
- password: password
- project_name: someproject
- filters:
- tenant_id: 55e2ce24b2a245b09f181bf025724cbe
- register: result
-
-- name: Show openstack subnets
- debug:
- msg: "{{ result.openstack_subnets }}"
-'''
-
-RETURN = '''
-openstack_subnets:
- description: has all the openstack information about the subnets
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the subnet.
- returned: success
- type: str
- network_id:
- description: Network ID this subnet belongs in.
- returned: success
- type: str
- cidr:
- description: Subnet's CIDR.
- returned: success
- type: str
- gateway_ip:
- description: Subnet's gateway ip.
- returned: success
- type: str
- enable_dhcp:
- description: DHCP enable flag for this subnet.
- returned: success
- type: bool
- ip_version:
- description: IP version for this subnet.
- returned: success
- type: int
- tenant_id:
- description: Tenant id associated with this subnet.
- returned: success
- type: str
- dns_nameservers:
- description: DNS name servers for this subnet.
- returned: success
- type: list
- elements: str
- allocation_pools:
- description: Allocation pools associated with this subnet.
- returned: success
- type: list
- elements: dict
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class SubnetInfoModule(OpenStackModule):
-
- deprecated_names = ('subnets_facts', 'openstack.cloud.subnets_facts')
-
- argument_spec = dict(
- name=dict(required=False, default=None, aliases=['subnet']),
- filters=dict(required=False, type='dict', default=None)
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def run(self):
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
- if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
- subnets = self.conn.search_subnets(**kwargs)
-
- self.exit(changed=False, openstack_subnets=subnets)
-
-
-def main():
- module = SubnetInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_user.py b/ansible_collections/openstack/cloud/plugins/modules/os_user.py
deleted file mode 100644
index 047b3ed8b..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_user.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_user
-short_description: Manage OpenStack Identity Users
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack Identity users. Users can be created,
- updated or deleted using this module. A user will be updated
- if I(name) matches an existing user and I(state) is present.
- The value for I(name) cannot be updated without deleting and
- re-creating the user.
-options:
- name:
- description:
- - Username for the user
- required: true
- type: str
- password:
- description:
- - Password for the user
- type: str
- update_password:
- required: false
- choices: ['always', 'on_create']
- default: on_create
- description:
- - C(always) will attempt to update password. C(on_create) will only
- set the password for newly created users.
- type: str
- email:
- description:
- - Email address for the user
- type: str
- description:
- description:
- - Description about the user
- type: str
- default_project:
- description:
- - Project name or ID that the user should be associated with by default
- type: str
- domain:
- description:
- - Domain to create the user in if the cloud supports domains
- type: str
- enabled:
- description:
- - Is the user enabled
- type: bool
- default: 'yes'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a user
-- openstack.cloud.identity_user:
- cloud: mycloud
- state: present
- name: demouser
- password: secret
- email: demo@example.com
- domain: default
- default_project: demo
-
-# Delete a user
-- openstack.cloud.identity_user:
- cloud: mycloud
- state: absent
- name: demouser
-
-# Create a user but don't update password if user exists
-- openstack.cloud.identity_user:
- cloud: mycloud
- state: present
- name: demouser
- password: secret
- update_password: on_create
- email: demo@example.com
- domain: default
- default_project: demo
-
-# Create a user without password
-- openstack.cloud.identity_user:
- cloud: mycloud
- state: present
- name: demouser
- email: demo@example.com
- domain: default
- default_project: demo
-'''
-
-
-RETURN = '''
-user:
- description: Dictionary describing the user.
- returned: On success when I(state) is 'present'
- type: dict
- contains:
- default_project_id:
- description: User default project ID. Only present with Keystone >= v3.
- returned: success
- type: str
- sample: "4427115787be45f08f0ec22a03bfc735"
- description:
- description: The description of this user
- returned: success
- type: str
- sample: "a user"
- domain_id:
- description: User domain ID. Only present with Keystone >= v3.
- returned: success
- type: str
- sample: "default"
- email:
- description: User email address
- returned: success
- type: str
- sample: "demo@example.com"
- id:
- description: User ID
- returned: success
- type: str
- sample: "f59382db809c43139982ca4189404650"
- enabled:
- description: Indicates whether the user is enabled
- type: bool
- name:
- description: Unique user name, within the owning domain
- returned: success
- type: str
- sample: "demouser"
- username:
- description: Username with Identity API v2 (OpenStack Pike or earlier) else Null
- returned: success
- type: str
-
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityUserModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=True),
- password=dict(required=False, default=None, no_log=True),
- email=dict(required=False, default=None),
- default_project=dict(required=False, default=None),
- description=dict(type='str'),
- domain=dict(required=False, default=None),
- enabled=dict(default=True, type='bool'),
- state=dict(default='present', choices=['absent', 'present']),
- update_password=dict(default='on_create', choices=['always', 'on_create']),
- )
-
- module_kwargs = dict()
-
- def _needs_update(self, params_dict, user):
- for k in params_dict:
- # We don't get password back in the user object, so assume any supplied
- # password is a change.
- if k == 'password':
- return True
- if k == 'default_project':
- if user['default_project_id'] != params_dict['default_project']:
- return True
- else:
- continue
- if user[k] != params_dict[k]:
- return True
- return False
-
- def _get_domain_id(self, domain):
- dom_obj = self.conn.identity.find_domain(domain)
- if dom_obj is None:
- # Ok, let's hope the user is non-admin and passing a sane id
- return domain
- return dom_obj.id
-
- def _get_default_project_id(self, default_project, domain_id):
- project = self.conn.identity.find_project(default_project, domain_id=domain_id)
- if not project:
- self.fail_json(msg='Default project %s is not valid' % default_project)
- return project['id']
-
- def run(self):
- name = self.params['name']
- password = self.params.get('password')
- email = self.params['email']
- default_project = self.params['default_project']
- domain = self.params['domain']
- enabled = self.params['enabled']
- state = self.params['state']
- update_password = self.params['update_password']
- description = self.params['description']
-
- if domain:
- domain_id = self._get_domain_id(domain)
- user = self.conn.get_user(name, domain_id=domain_id)
- else:
- domain_id = None
- user = self.conn.get_user(name)
-
- changed = False
- if state == 'present':
- user_args = {
- 'name': name,
- 'email': email,
- 'domain_id': domain_id,
- 'description': description,
- 'enabled': enabled,
- }
- if default_project:
- default_project_id = self._get_default_project_id(
- default_project, domain_id)
- user_args['default_project'] = default_project_id
- user_args = {k: v for k, v in user_args.items() if v is not None}
-
- changed = False
- if user is None:
- if password:
- user_args['password'] = password
-
- user = self.conn.create_user(**user_args)
- changed = True
- else:
- if update_password == 'always':
- if not password:
- self.fail_json(msg="update_password is always but a password value is missing")
- user_args['password'] = password
-
- if self._needs_update(user_args, user):
- user = self.conn.update_user(user['id'], **user_args)
- changed = True
-
- self.exit_json(changed=changed, user=user)
- elif state == 'absent' and user is not None:
- self.conn.identity.delete_user(user['id'])
- changed = True
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityUserModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_user_group.py b/ansible_collections/openstack/cloud/plugins/modules/os_user_group.py
deleted file mode 100644
index ce8f28e12..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_user_group.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: group_assignment
-short_description: Associate OpenStack Identity users and groups
-author: OpenStack Ansible SIG
-description:
- - Add and remove users from groups
-options:
- user:
- description:
- - Name or id for the user
- required: true
- type: str
- group:
- description:
- - Name or id for the group.
- required: true
- type: str
- state:
- description:
- - Should the user be present or absent in the group
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Add the demo user to the demo group
-- openstack.cloud.group_assignment:
- cloud: mycloud
- user: demo
- group: demo
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityGroupAssignment(OpenStackModule):
- argument_spec = dict(
- user=dict(required=True),
- group=dict(required=True),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _system_state_change(self, state, in_group):
- if state == 'present' and not in_group:
- return True
- if state == 'absent' and in_group:
- return True
- return False
-
- def run(self):
- user = self.params['user']
- group = self.params['group']
- state = self.params['state']
-
- in_group = self.conn.is_user_in_group(user, group)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, in_group))
-
- changed = False
- if state == 'present':
- if not in_group:
- self.conn.add_user_to_group(user, group)
- changed = True
-
- elif state == 'absent':
- if in_group:
- self.conn.remove_user_from_group(user, group)
- changed = True
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityGroupAssignment()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_user_info.py b/ansible_collections/openstack/cloud/plugins/modules/os_user_info.py
deleted file mode 100644
index c0e0d9499..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_user_info.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: identity_user_info
-short_description: Retrieve information about one or more OpenStack users
-author: OpenStack Ansible SIG
-description:
- - Retrieve information about a one or more OpenStack users
- - This module was called C(openstack.cloud.identity_user_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.identity_user_info) module no longer returns C(ansible_facts)!
-options:
- name:
- description:
- - Name or ID of the user
- type: str
- domain:
- description:
- - Name or ID of the domain containing the user if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Gather information about previously created users
-- openstack.cloud.identity_user_info:
- cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-
-# Gather information about a previously created user by name
-- openstack.cloud.identity_user_info:
- cloud: awesomecloud
- name: demouser
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-
-# Gather information about a previously created user in a specific domain
-- openstack.cloud.identity_user_info:
- cloud: awesomecloud
- name: demouser
- domain: admindomain
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-
-# Gather information about a previously created user in a specific domain with filter
-- openstack.cloud.identity_user_info:
- cloud: awesomecloud
- name: demouser
- domain: admindomain
- filters:
- enabled: False
- register: result
-- debug:
- msg: "{{ result.openstack_users }}"
-'''
-
-
-RETURN = '''
-openstack_users:
- description: has all the OpenStack information about users
- returned: always
- type: list
- elements: dict
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Username of the user.
- returned: success
- type: str
- default_project_id:
- description: Default project ID of the user
- returned: success
- type: str
- description:
- description: The description of this user
- returned: success
- type: str
- domain_id:
- description: Domain ID containing the user
- returned: success
- type: str
- email:
- description: Email of the user
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the user is enabled
- returned: success
- type: bool
- username:
- description: Username with Identity API v2 (OpenStack Pike or earlier) else Null
- returned: success
- type: str
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityUserInfoModule(OpenStackModule):
- argument_spec = dict(
- name=dict(required=False, default=None),
- domain=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None),
- )
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- deprecated_names = ('openstack.cloud.identity_user_facts')
-
- def run(self):
- name = self.params['name']
- domain = self.params['domain']
- filters = self.params['filters']
-
- args = {}
- if domain:
- dom_obj = self.conn.identity.find_domain(domain)
- if dom_obj is None:
- self.fail_json(
- msg="Domain name or ID '{0}' does not exist".format(domain))
- args['domain_id'] = dom_obj.id
-
- users = self.conn.search_users(name, filters, **args)
- self.exit_json(changed=False, openstack_users=users)
-
-
-def main():
- module = IdentityUserInfoModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_user_role.py b/ansible_collections/openstack/cloud/plugins/modules/os_user_role.py
deleted file mode 100644
index 5ad7dce42..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_user_role.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 IBM
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: role_assignment
-short_description: Associate OpenStack Identity users and roles
-author: OpenStack Ansible SIG
-description:
- - Grant and revoke roles in either project or domain context for
- OpenStack Identity Users.
-options:
- role:
- description:
- - Name or ID for the role.
- required: true
- type: str
- user:
- description:
- - Name or ID for the user. If I(user) is not specified, then
- I(group) is required. Both may not be specified.
- type: str
- group:
- description:
- - Name or ID for the group. Valid only with keystone version 3.
- If I(group) is not specified, then I(user) is required. Both
- may not be specified.
- type: str
- project:
- description:
- - Name or ID of the project to scope the role association to.
- If you are using keystone version 2, then this value is required.
- type: str
- domain:
- description:
- - Name or ID of the domain to scope the role association to. Valid only
- with keystone version 3, and required if I(project) is not specified.
- type: str
- state:
- description:
- - Should the roles be present or absent on the user.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Grant an admin role on the user admin in the project project1
-- openstack.cloud.role_assignment:
- cloud: mycloud
- user: admin
- role: admin
- project: project1
-
-# Revoke the admin role from the user barney in the newyork domain
-- openstack.cloud.role_assignment:
- cloud: mycloud
- state: absent
- user: barney
- role: admin
- domain: newyork
-'''
-
-RETURN = '''
-#
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityRoleAssignmentModule(OpenStackModule):
- argument_spec = dict(
- role=dict(required=True),
- user=dict(required=False),
- group=dict(required=False),
- project=dict(required=False),
- domain=dict(required=False),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- required_one_of=[
- ['user', 'group']
- ],
- supports_check_mode=True
- )
-
- def _system_state_change(self, state, assignment):
- if state == 'present' and not assignment:
- return True
- elif state == 'absent' and assignment:
- return True
- return False
-
- def _build_kwargs(self, user, group, project, domain):
- kwargs = {}
- if user:
- kwargs['user'] = user
- if group:
- kwargs['group'] = group
- if project:
- kwargs['project'] = project
- if domain:
- kwargs['domain'] = domain
- return kwargs
-
- def run(self):
- role = self.params.get('role')
- user = self.params.get('user')
- group = self.params.get('group')
- project = self.params.get('project')
- domain = self.params.get('domain')
- state = self.params.get('state')
-
- filters = {}
- find_filters = {}
- domain_id = None
-
- r = self.conn.identity.find_role(role)
- if r is None:
- self.fail_json(msg="Role %s is not valid" % role)
- filters['role'] = r['id']
-
- if domain:
- d = self.conn.identity.find_domain(domain)
- if d is None:
- self.fail_json(msg="Domain %s is not valid" % domain)
- domain_id = d['id']
- find_filters['domain_id'] = domain_id
- if user:
- u = self.conn.identity.find_user(user, **find_filters)
- if u is None:
- self.fail_json(msg="User %s is not valid" % user)
- filters['user'] = u['id']
-
- if group:
- # self.conn.identity.find_group() does not accept
- # a domain_id argument in Train's openstacksdk
- g = self.conn.get_group(group, **find_filters)
- if g is None:
- self.fail_json(msg="Group %s is not valid" % group)
- filters['group'] = g['id']
- if project:
- p = self.conn.identity.find_project(project, **find_filters)
- if p is None:
- self.fail_json(msg="Project %s is not valid" % project)
- filters['project'] = p['id']
-
- # Keeping the self.conn.list_role_assignments because it calls directly
- # the identity.role_assignments and there are some logics for the
- # filters that won't worth rewrite here.
- assignment = self.conn.list_role_assignments(filters=filters)
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, assignment))
-
- changed = False
-
- # Both grant_role and revoke_role calls directly the proxy layer, and
- # has some logic that won't worth to rewrite here so keeping it is a
- # good idea
- if state == 'present':
- if not assignment:
- kwargs = self._build_kwargs(user, group, project, domain_id)
- self.conn.grant_role(role, **kwargs)
- changed = True
-
- elif state == 'absent':
- if assignment:
- kwargs = self._build_kwargs(user, group, project, domain_id)
- self.conn.revoke_role(role, **kwargs)
- changed = True
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = IdentityRoleAssignmentModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_volume.py b/ansible_collections/openstack/cloud/plugins/modules/os_volume.py
deleted file mode 100644
index 3a50c05a8..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_volume.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: volume
-short_description: Create/Delete Cinder Volumes
-author: OpenStack Ansible SIG
-description:
- - Create or Remove cinder block storage volumes
-options:
- size:
- description:
- - Size of volume in GB. This parameter is required when the
- I(state) parameter is 'present'.
- type: int
- display_name:
- description:
- - Name of volume
- required: true
- type: str
- aliases: [name]
- display_description:
- description:
- - String describing the volume
- type: str
- aliases: [description]
- volume_type:
- description:
- - Volume type for volume
- type: str
- image:
- description:
- - Image name or id for boot from volume
- type: str
- snapshot_id:
- description:
- - Volume snapshot id to create from
- type: str
- volume:
- description:
- - Volume name or id to create from
- type: str
- bootable:
- description:
- - Bootable flag for volume.
- type: bool
- default: False
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- scheduler_hints:
- description:
- - Scheduler hints passed to volume API in form of dict
- type: dict
- metadata:
- description:
- - Metadata for the volume
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Creates a new volume
-- name: create a volume
- hosts: localhost
- tasks:
- - name: create 40g test volume
- openstack.cloud.volume:
- state: present
- cloud: mordred
- availability_zone: az2
- size: 40
- display_name: test_volume
- scheduler_hints:
- same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
-'''
-
-RETURNS = '''
-id:
- description: Cinder's unique ID for this volume
- returned: always
- type: str
- sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
-
-volume:
- description: Cinder's representation of the volume object
- returned: always
- type: dict
- sample: {'...'}
-'''
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class VolumeModule(OpenStackModule):
-
- argument_spec = dict(
- size=dict(type='int'),
- volume_type=dict(type='str'),
- display_name=dict(required=True, aliases=['name'], type='str'),
- display_description=dict(aliases=['description'], type='str'),
- image=dict(type='str'),
- snapshot_id=dict(type='str'),
- volume=dict(type='str'),
- state=dict(default='present', choices=['absent', 'present'], type='str'),
- scheduler_hints=dict(type='dict'),
- metadata=dict(type='dict'),
- bootable=dict(type='bool', default=False)
- )
-
- module_kwargs = dict(
- mutually_exclusive=[
- ['image', 'snapshot_id', 'volume'],
- ],
- required_if=[
- ['state', 'present', ['size']],
- ],
- )
-
- def _needs_update(self, volume):
- '''
- check for differences in updatable values, at the moment
- openstacksdk only supports extending the volume size, this
- may change in the future.
- :returns: bool
- '''
- compare_simple = ['size']
-
- for k in compare_simple:
- if self.params[k] is not None and self.params[k] != volume.get(k):
- return True
-
- return False
-
- def _modify_volume(self, volume):
- '''
- modify volume, the only modification to an existing volume
- available at the moment is extending the size, this is
- limited by the openstacksdk and may change whenever the
- functionality is extended.
- '''
- volume = self.conn.get_volume(self.params['display_name'])
- diff = {'before': volume, 'after': ''}
- size = self.params['size']
-
- if size < volume.get('size'):
- self.fail_json(
- msg='Cannot shrink volumes, size: {0} < {1}'.format(size, volume.get('size'))
- )
-
- if not self._needs_update(volume):
- diff['after'] = volume
- self.exit_json(changed=False, id=volume['id'], volume=volume, diff=diff)
-
- if self.ansible.check_mode:
- diff['after'] = volume
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
-
- self.conn.volume.extend_volume(
- volume.id,
- size
- )
- diff['after'] = self.conn.get_volume(self.params['display_name'])
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
-
- def _present_volume(self):
-
- diff = {'before': '', 'after': ''}
-
- volume_args = dict(
- size=self.params['size'],
- volume_type=self.params['volume_type'],
- display_name=self.params['display_name'],
- display_description=self.params['display_description'],
- snapshot_id=self.params['snapshot_id'],
- bootable=self.params['bootable'],
- availability_zone=self.params['availability_zone'],
- )
- if self.params['image']:
- image_id = self.conn.get_image_id(self.params['image'])
- if not image_id:
- self.fail_json(msg="Failed to find image '%s'" % self.params['image'])
- volume_args['imageRef'] = image_id
-
- if self.params['volume']:
- volume_id = self.conn.get_volume_id(self.params['volume'])
- if not volume_id:
- self.fail_json(msg="Failed to find volume '%s'" % self.params['volume'])
- volume_args['source_volid'] = volume_id
-
- if self.params['scheduler_hints']:
- volume_args['scheduler_hints'] = self.params['scheduler_hints']
-
- if self.params['metadata']:
- volume_args['metadata'] = self.params['metadata']
-
- if self.ansible.check_mode:
- diff['after'] = volume_args
- self.exit_json(changed=True, id=None, volume=volume_args, diff=diff)
-
- volume = self.conn.create_volume(
- wait=self.params['wait'], timeout=self.params['timeout'],
- **volume_args)
- diff['after'] = volume
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
-
- def _absent_volume(self, volume):
- changed = False
- diff = {'before': '', 'after': ''}
-
- if self.conn.volume_exists(self.params['display_name']):
- volume = self.conn.get_volume(self.params['display_name'])
- diff['before'] = volume
-
- if self.ansible.check_mode:
- self.exit_json(changed=True, diff=diff)
-
- try:
- changed = self.conn.delete_volume(name_or_id=self.params['display_name'],
- wait=self.params['wait'],
- timeout=self.params['timeout'])
- except self.sdk.exceptions.ResourceTimeout:
- diff['after'] = volume
- self.exit_json(changed=changed, diff=diff)
-
- self.exit_json(changed=changed, diff=diff)
-
- def run(self):
-
- state = self.params['state']
- if self.conn.volume_exists(self.params['display_name']):
- volume = self.conn.get_volume(self.params['display_name'])
- else:
- volume = None
-
- if state == 'present':
- if not volume:
- self._present_volume()
- elif self._needs_update(volume):
- self._modify_volume(volume)
- else:
- self.exit_json(changed=False, id=volume['id'], volume=volume)
- if state == 'absent':
- self._absent_volume(volume)
-
-
-def main():
- module = VolumeModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_volume_snapshot.py b/ansible_collections/openstack/cloud/plugins/modules/os_volume_snapshot.py
deleted file mode 100644
index 8625984c6..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_volume_snapshot.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright (c) 2016, Mario Santos <mario.rf.santos@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: volume_snapshot
-short_description: Create/Delete Cinder Volume Snapshots
-author: OpenStack Ansible SIG
-description:
- - Create or Delete cinder block storage volume snapshots
-options:
- display_name:
- description:
- - Name of the snapshot
- required: true
- aliases: ['name']
- type: str
- display_description:
- description:
- - String describing the snapshot
- aliases: ['description']
- type: str
- volume:
- description:
- - The volume name or id to create/delete the snapshot
- required: True
- type: str
- force:
- description:
- - Allows or disallows snapshot of a volume to be created when the volume
- is attached to an instance.
- type: bool
- default: 'no'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Creates a snapshot on volume 'test_volume'
-- name: create and delete snapshot
- hosts: localhost
- tasks:
- - name: create snapshot
- openstack.cloud.volume_snapshot:
- state: present
- cloud: mordred
- availability_zone: az2
- display_name: test_snapshot
- volume: test_volume
- - name: delete snapshot
- openstack.cloud.volume_snapshot:
- state: absent
- cloud: mordred
- availability_zone: az2
- display_name: test_snapshot
- volume: test_volume
-'''
-
-RETURN = '''
-snapshot:
- description: The snapshot instance after the change
- returned: success
- type: dict
- sample:
- id: 837aca54-c0ee-47a2-bf9a-35e1b4fdac0c
- name: test_snapshot
- volume_id: ec646a7c-6a35-4857-b38b-808105a24be6
- size: 2
- status: available
- display_name: test_snapshot
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class VolumeSnapshotModule(OpenStackModule):
- argument_spec = dict(
- display_name=dict(required=True, aliases=['name']),
- display_description=dict(default=None, aliases=['description']),
- volume=dict(required=True),
- force=dict(required=False, default=False, type='bool'),
- state=dict(default='present', choices=['absent', 'present']),
- )
-
- module_kwargs = dict(
- supports_check_mode=True
- )
-
- def _present_volume_snapshot(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'], filters={'volume_id': volume.id})
- if not snapshot:
- snapshot = self.conn.create_volume_snapshot(
- volume.id,
- force=self.params['force'],
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- name=self.params['display_name'],
- description=self.params.get('display_description')
- )
- self.exit_json(changed=True, snapshot=snapshot)
- else:
- self.exit_json(changed=False, snapshot=snapshot)
-
- def _absent_volume_snapshot(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'], filters={'volume_id': volume.id})
- if not snapshot:
- self.exit_json(changed=False)
- else:
- self.conn.delete_volume_snapshot(
- name_or_id=snapshot.id,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- self.exit_json(changed=True, snapshot_id=snapshot.id)
-
- def _system_state_change(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'],
- filters={'volume_id': volume.id})
- state = self.params['state']
-
- if state == 'present':
- return snapshot is None
- if state == 'absent':
- return snapshot is not None
-
- def run(self):
- state = self.params['state']
-
- if self.conn.volume_exists(self.params['volume']):
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change())
- if state == 'present':
- self._present_volume_snapshot()
- if state == 'absent':
- self._absent_volume_snapshot()
- else:
- self.fail_json(
- msg="No volume with name or id '{0}' was found.".format(
- self.params['volume']))
-
-
-def main():
- module = VolumeSnapshotModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/os_zone.py b/ansible_collections/openstack/cloud/plugins/modules/os_zone.py
deleted file mode 100644
index 98cf655e3..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/os_zone.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Hewlett-Packard Enterprise
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
----
-module: dns_zone
-short_description: Manage OpenStack DNS zones
-author: OpenStack Ansible SIG
-description:
- - Manage OpenStack DNS zones. Zones can be created, deleted or
- updated. Only the I(email), I(description), I(ttl) and I(masters) values
- can be updated.
-options:
- name:
- description:
- - Zone name
- required: true
- type: str
- zone_type:
- description:
- - Zone type
- choices: [primary, secondary]
- type: str
- email:
- description:
- - Email of the zone owner (only applies if zone_type is primary)
- type: str
- description:
- description:
- - Zone description
- type: str
- ttl:
- description:
- - TTL (Time To Live) value in seconds
- type: int
- masters:
- description:
- - Master nameservers (only applies if zone_type is secondary)
- type: list
- elements: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-# Create a zone named "example.net"
-- openstack.cloud.dns_zone:
- cloud: mycloud
- state: present
- name: example.net.
- zone_type: primary
- email: test@example.net
- description: Test zone
- ttl: 3600
-
-# Update the TTL on existing "example.net." zone
-- openstack.cloud.dns_zone:
- cloud: mycloud
- state: present
- name: example.net.
- ttl: 7200
-
-# Delete zone named "example.net."
-- openstack.cloud.dns_zone:
- cloud: mycloud
- state: absent
- name: example.net.
-'''
-
-RETURN = '''
-zone:
- description: Dictionary describing the zone.
- returned: On success when I(state) is 'present'.
- type: complex
- contains:
- id:
- description: Unique zone ID
- type: str
- sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
- name:
- description: Zone name
- type: str
- sample: "example.net."
- type:
- description: Zone type
- type: str
- sample: "PRIMARY"
- email:
- description: Zone owner email
- type: str
- sample: "test@example.net"
- description:
- description: Zone description
- type: str
- sample: "Test description"
- ttl:
- description: Zone TTL value
- type: int
- sample: 3600
- masters:
- description: Zone master nameservers
- type: list
- sample: []
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class DnsZoneModule(OpenStackModule):
-
- argument_spec = dict(
- name=dict(required=True, type='str'),
- zone_type=dict(required=False, choices=['primary', 'secondary'], type='str'),
- email=dict(required=False, type='str'),
- description=dict(required=False, type='str'),
- ttl=dict(required=False, type='int'),
- masters=dict(required=False, type='list', elements='str'),
- state=dict(default='present', choices=['absent', 'present'], type='str'),
- )
-
- def _system_state_change(self, state, email, description, ttl, masters, zone):
- if state == 'present':
- if not zone:
- return True
- if email is not None and zone.email != email:
- return True
- if description is not None and zone.description != description:
- return True
- if ttl is not None and zone.ttl != ttl:
- return True
- if masters is not None and zone.masters != masters:
- return True
- if state == 'absent' and zone:
- return True
- return False
-
- def _wait(self, timeout, zone, state):
- """Wait for a zone to reach the desired state for the given state."""
-
- for count in self.sdk.utils.iterate_timeout(
- timeout,
- "Timeout waiting for zone to be %s" % state):
-
- if (state == 'absent' and zone is None) or (state == 'present' and zone and zone.status == 'ACTIVE'):
- return
-
- try:
- zone = self.conn.get_zone(zone.id)
- except Exception:
- continue
-
- if zone and zone.status == 'ERROR':
- self.fail_json(msg="Zone reached ERROR state while waiting for it to be %s" % state)
-
- def run(self):
-
- name = self.params['name']
- state = self.params['state']
- wait = self.params['wait']
- timeout = self.params['timeout']
-
- zone = self.conn.get_zone(name)
-
- if state == 'present':
-
- zone_type = self.params['zone_type']
- email = self.params['email']
- description = self.params['description']
- ttl = self.params['ttl']
- masters = self.params['masters']
-
- kwargs = {}
-
- if email:
- kwargs['email'] = email
- if description:
- kwargs['description'] = description
- if ttl:
- kwargs['ttl'] = ttl
- if masters:
- kwargs['masters'] = masters
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, email,
- description, ttl,
- masters, zone))
-
- if zone is None:
- zone = self.conn.create_zone(
- name=name, zone_type=zone_type, **kwargs)
- changed = True
- else:
- if masters is None:
- masters = []
-
- pre_update_zone = zone
- changed = self._system_state_change(state, email,
- description, ttl,
- masters, pre_update_zone)
- if changed:
- zone = self.conn.update_zone(
- name, **kwargs)
-
- if wait:
- self._wait(timeout, zone, state)
-
- self.exit_json(changed=changed, zone=zone)
-
- elif state == 'absent':
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, None,
- None, None,
- None, zone))
-
- if zone is None:
- changed = False
- else:
- self.conn.delete_zone(name)
- changed = True
-
- if wait:
- self._wait(timeout, zone, state)
-
- self.exit_json(changed=changed)
-
-
-def main():
- module = DnsZoneModule()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/port.py b/ansible_collections/openstack/cloud/plugins/modules/port.py
index accef4fcc..65e3b4cc1 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/port.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/port.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -9,132 +10,150 @@ module: port
short_description: Add/Update/Delete ports from an OpenStack cloud.
author: OpenStack Ansible SIG
description:
- - Add, Update or Remove ports from an OpenStack cloud. A I(state) of
- 'present' will ensure the port is created or updated if required.
+ - Add, Update or Remove ports from an OpenStack cloud.
options:
- network:
- description:
- - Network ID or name this port belongs to.
- - Required when creating a new port.
- type: str
- name:
- description:
- - Name that has to be given to the port.
- type: str
- fixed_ips:
- description:
- - Desired IP and/or subnet for this port. Subnet is referenced by
- subnet_id and IP is referenced by ip_address.
- type: list
- elements: dict
- suboptions:
- ip_address:
- description: The fixed IP address to attempt to allocate.
- required: true
- type: str
- subnet_id:
- description: The subnet to attach the IP address to.
- type: str
- admin_state_up:
- description:
- - Sets admin state.
- type: bool
- mac_address:
- description:
- - MAC address of this port.
- type: str
- security_groups:
- description:
- - Security group(s) ID(s) or name(s) associated with the port (comma
- separated string or YAML list)
- type: list
- elements: str
- no_security_groups:
- description:
- - Do not associate a security group with this port.
- type: bool
- default: 'no'
- allowed_address_pairs:
- description:
- - "Allowed address pairs list. Allowed address pairs are supported with
- dictionary structure.
- e.g. allowed_address_pairs:
- - ip_address: 10.1.0.12
- mac_address: ab:cd:ef:12:34:56
- - ip_address: ..."
- type: list
- elements: dict
- suboptions:
- ip_address:
- description: The IP address.
- type: str
- mac_address:
- description: The MAC address.
- type: str
- extra_dhcp_opts:
- description:
- - "Extra dhcp options to be assigned to this port. Extra options are
- supported with dictionary structure. Note that options cannot be removed
- only updated.
- e.g. extra_dhcp_opts:
- - opt_name: opt name1
- opt_value: value1
- ip_version: 4
- - opt_name: ..."
- type: list
- elements: dict
- suboptions:
- opt_name:
- description: The name of the DHCP option to set.
- type: str
- required: true
- opt_value:
- description: The value of the DHCP option to set.
- type: str
- required: true
- ip_version:
- description: The IP version this DHCP option is for.
- type: int
- required: true
- device_owner:
- description:
- - The ID of the entity that uses this port.
- type: str
- device_id:
- description:
- - Device ID of device using this port.
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- vnic_type:
- description:
- - The type of the port that should be created
- choices: [normal, direct, direct-physical, macvtap, baremetal, virtio-forwarder]
- type: str
- port_security_enabled:
- description:
- - Whether to enable or disable the port security on the network.
- type: bool
- binding_profile:
- description:
- - Binding profile dict that the port should be created with.
- type: dict
- dns_name:
- description:
- - The dns name of the port ( only with dns-integration enabled )
- type: str
- dns_domain:
- description:
- - The dns domain of the port ( only with dns-integration enabled )
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ allowed_address_pairs:
+ description:
+ - "Allowed address pairs list. Allowed address pairs are supported
+ with dictionary structure.
+ e.g. allowed_address_pairs:
+ - ip_address: 10.1.0.12
+ mac_address: ab:cd:ef:12:34:56
+ - ip_address: ..."
+ - The port will change during update if not all suboptions are
+ specified, e.g. when ip_address is given but mac_address is not.
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description: The IP address.
+ type: str
+ mac_address:
+ description: The MAC address.
+ type: str
+ binding_profile:
+ description:
+ - Binding profile dict that the port should be created with.
+ type: dict
+ binding_vnic_type:
+ description:
+ - The type of the port that should be created
+ choices: [normal,
+ direct,
+ direct-physical,
+ macvtap,
+ baremetal,
+ virtio-forwarder]
+ type: str
+ aliases: ['vnic_type']
+ description:
+ description:
+ - Description of the port.
+ type: str
+ device_id:
+ description:
+ - Device ID of device using this port.
+ type: str
+ device_owner:
+ description:
+ - The ID of the entity that uses this port.
+ type: str
+ dns_domain:
+ description:
+ - The dns domain of the port ( only with dns-integration enabled )
+ type: str
+ dns_name:
+ description:
+ - The dns name of the port ( only with dns-integration enabled )
+ type: str
+ extra_dhcp_opts:
+ description:
+ - "Extra dhcp options to be assigned to this port. Extra options are
+ supported with dictionary structure. Note that options cannot be
+ removed only updated.
+ e.g. extra_dhcp_opts:
+ - ip_version: 4
+ opt_name: bootfile-name
+ opt_value: pxelinux.0
+ - opt_name: ..."
+ - The port will change during update if not all suboptions are
+ specified, e.g. when opt_name is given but ip_version is not.
+ type: list
+ elements: dict
+ suboptions:
+ ip_version:
+ description: The IP version this DHCP option is for.
+ type: int
+ required: true
+ opt_name:
+ description: The name of the DHCP option to set.
+ type: str
+ required: true
+ opt_value:
+ description: The value of the DHCP option to set.
+ type: str
+ required: true
+ fixed_ips:
+ description:
+ - Desired IP and/or subnet for this port. Subnet is referenced by
+ subnet_id and IP is referenced by ip_address.
+ - The port will change during update if not all suboptions are
+ specified, e.g. when ip_address is given but subnet_id is not.
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description: The fixed IP address to attempt to allocate.
+ required: true
+ type: str
+ subnet_id:
+ description: The subnet to attach the IP address to.
+ type: str
+ is_admin_state_up:
+ description:
+ - Sets admin state.
+ type: bool
+ aliases: ['admin_state_up']
+ mac_address:
+ description:
+ - MAC address of this port.
+ type: str
+ name:
+ description:
+ - Name that has to be given to the port.
+ - This port attribute cannot be updated.
+ type: str
+ required: true
+ network:
+ description:
+ - ID or name of the network this port belongs to.
+ - Required when creating a new port.
+ - Must be a name when creating a port.
+ - This port attribute cannot be updated.
+ type: str
+ no_security_groups:
+ description:
+ - Do not associate a security group with this port.
+ - "Deprecated. Use I(security_groups): C([]) instead
+ of I(no_security_groups): C(true)."
+ type: bool
+ default: 'false'
+ is_port_security_enabled:
+ description:
+ - Whether to enable or disable the port security on the network.
+ type: bool
+ aliases: ['port_security_enabled']
+ security_groups:
+ description:
+ - Security group(s) ID(s) or name(s) associated with the port.
+ type: list
+ elements: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -210,7 +229,7 @@ EXAMPLES = '''
project_name: admin
name: port1
network: foo
- vnic_type: direct
+ binding_vnic_type: direct
# Create a port with binding profile
- openstack.cloud.port:
@@ -223,306 +242,457 @@ EXAMPLES = '''
name: port1
network: foo
binding_profile:
- "pci_slot": "0000:03:11.1"
- "physical_network": "provider"
+ pci_slot: "0000:03:11.1"
+ physical_network: "provider"
'''
RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: Name given to the port.
- returned: success
- type: str
-network_id:
- description: Network ID this port belongs in.
- returned: success
- type: str
-security_groups:
- description: Security group(s) associated with this port.
- returned: success
- type: list
-status:
- description: Port's status.
- returned: success
- type: str
-fixed_ips:
- description: Fixed ip(s) associated with this port.
- returned: success
- type: list
-tenant_id:
- description: Tenant id associated with this port.
- returned: success
- type: str
-allowed_address_pairs:
- description: Allowed address pairs with this port.
- returned: success
- type: list
-admin_state_up:
- description: Admin state up flag for this port.
- returned: success
- type: bool
-vnic_type:
- description: Type of the created port
- returned: success
- type: str
-port_security_enabled:
- description: Port security state on the network.
- returned: success
- type: bool
-binding:profile:
- description: Port binded profile
- returned: success
+port:
+ description: Dictionary describing the port.
type: dict
+ returned: On success when I(state) is C(present).
+ contains:
+ allowed_address_pairs:
+ description: Allowed address pairs.
+ returned: success
+ type: list
+ sample: []
+ binding_host_id:
+ description: |
+ The ID of the host where the port is allocated. In some cases,
+ different implementations can run on different hosts.
+ returned: success
+ type: str
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ binding_profile:
+ description: |
+ A dictionary the enables the application running on the
+ specified host to pass and receive vif port-specific
+ information to the plug-in.
+ returned: success
+ type: dict
+ sample: {}
+ binding_vif_details:
+ description: |
+ A dictionary that enables the application to pass
+ information about functions that the Networking API provides.
+ returned: success
+ type: dict
+ binding_vif_type:
+ description: The VIF type for the port.
+ returned: success
+ type: dict
+ binding_vnic_type:
+ description: |
+ The virtual network interface card (vNIC) type that is
+ bound to the neutron port.
+ returned: success
+ type: str
+ sample: "normal"
+ created_at:
+ description: Timestamp when the port was created.
+ returned: success
+ type: str
+ sample: "2022-02-03T13:28:25Z"
+ data_plane_status:
+ description: Status of the underlying data plane of a port.
+ returned: success
+ type: str
+ description:
+ description: The port description.
+ returned: success
+ type: str
+ device_id:
+ description: Device ID of this port.
+ returned: success
+ type: str
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ device_owner:
+ description: Device owner of this port, e.g. C(network:dhcp).
+ returned: success
+ type: str
+ sample: "network:router_interface"
+ device_profile:
+ description: |
+ Device profile of this port, refers to Cyborg device-profiles:
+ https://docs.openstack.org/api-ref/accelerator/v2/index.html#
+ device-profiles.
+ returned: success
+ type: str
+ dns_assignment:
+ description: DNS assignment for the port.
+ returned: success
+ type: list
+ dns_domain:
+ description: DNS domain assigned to the port.
+ returned: success
+ type: str
+ dns_name:
+ description: DNS name for the port.
+ returned: success
+ type: str
+ extra_dhcp_opts:
+ description: |
+ A set of zero or more extra DHCP option pairs.
+ An option pair consists of an option value and name.
+ returned: success
+ type: list
+ sample: []
+ fixed_ips:
+ description: |
+ IP addresses for the port. Includes the IP address and subnet
+ ID.
+ returned: success
+ type: list
+ id:
+ description: The port ID.
+ returned: success
+ type: str
+ sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
+ ip_allocation:
+ description: |
+ The ip_allocation indicates when ports use deferred,
+ immediate or no IP allocation.
+ returned: success
+ type: str
+ is_admin_state_up:
+ description: |
+ The administrative state of the port, which is up C(True) or
+ down C(False).
+ returned: success
+ type: bool
+ sample: true
+ is_port_security_enabled:
+ description: |
+ The port security status, which is enabled C(True) or disabled
+ C(False).
+ returned: success
+ type: bool
+ sample: false
+ mac_address:
+ description: The MAC address of an allowed address pair.
+ returned: success
+ type: str
+ sample: "00:00:5E:00:53:42"
+ name:
+ description: The port name.
+ returned: success
+ type: str
+ sample: "port_name"
+ network_id:
+ description: The ID of the attached network.
+ returned: success
+ type: str
+ sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
+ numa_affinity_policy:
+ description: |
+ The NUMA affinity policy defined for this port.
+ returned: success
+ type: str
+ sample: "required"
+ project_id:
+ description: The ID of the project who owns the network.
+ returned: success
+ type: str
+ sample: "aa1ede4f-3952-4131-aab6-3b8902268c7d"
+ propagate_uplink_status:
+ description: Whether to propagate uplink status of the port.
+ returned: success
+ type: bool
+ sample: false
+ qos_network_policy_id:
+ description: |
+ The ID of the QoS policy attached to the network where the
+ port is bound.
+ returned: success
+ type: str
+ sample: "1e4f3958-c0c9-4dec-82fa-ed2dc1c5cb34"
+ qos_policy_id:
+ description: The ID of the QoS policy attached to the port.
+ returned: success
+ type: str
+ sample: "b20bb47f-5d6d-45a6-8fe7-2c1b44f0db73"
+ resource_request:
+ description: |
+ The port-resource-request exposes Placement resources
+ (i.e.: minimum-bandwidth) and traits (i.e.: vnic-type, physnet)
+ requested by a port to Nova and Placement.
+ returned: success
+ type: str
+ revision_number:
+ description: The revision number of the resource.
+ returned: success
+ type: int
+ sample: 0
+ security_group_ids:
+ description: The IDs of any attached security groups.
+ returned: success
+ type: list
+ status:
+ description: The port status. Value is C(ACTIVE) or C(DOWN).
+ returned: success
+ type: str
+ sample: "ACTIVE"
+ tags:
+ description: The list of tags on the resource.
+ returned: success
+ type: list
+ sample: []
+ tenant_id:
+ description: Same as I(project_id). Deprecated.
+ returned: success
+ type: str
+ sample: "51fce036d7984ba6af4f6c849f65ef00"
+ trunk_details:
+ description: |
+ The trunk referring to this parent port and its subports.
+ Present for trunk parent ports if C(trunk-details) extension
+ is loaded.
+ returned: success
+ type: dict
+ updated_at:
+ description: Timestamp when the port was last updated.
+ returned: success
+ type: str
+ sample: "2022-02-03T13:28:25Z"
'''
-from ansible.module_utils.basic import missing_required_lib
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-try:
- from collections import OrderedDict
- HAS_ORDEREDDICT = True
-except ImportError:
- try:
- from ordereddict import OrderedDict
- HAS_ORDEREDDICT = True
- except ImportError:
- HAS_ORDEREDDICT = False
-
-class NetworkPortModule(OpenStackModule):
+class PortModule(OpenStackModule):
argument_spec = dict(
- network=dict(required=False),
- name=dict(required=False),
- fixed_ips=dict(type='list', default=None, elements='dict'),
- admin_state_up=dict(type='bool', default=None),
- mac_address=dict(default=None),
- security_groups=dict(default=None, type='list', elements='str'),
+ allowed_address_pairs=dict(type='list', elements='dict'),
+ binding_profile=dict(type='dict'),
+ binding_vnic_type=dict(choices=['normal', 'direct', 'direct-physical',
+ 'macvtap', 'baremetal',
+ 'virtio-forwarder'],
+ aliases=['vnic_type']),
+ description=dict(),
+ device_id=dict(),
+ device_owner=dict(),
+ dns_domain=dict(),
+ dns_name=dict(),
+ extra_dhcp_opts=dict(type='list', elements='dict'),
+ fixed_ips=dict(type='list', elements='dict'),
+ is_admin_state_up=dict(type='bool', aliases=['admin_state_up']),
+ mac_address=dict(),
+ name=dict(required=True),
+ network=dict(),
no_security_groups=dict(default=False, type='bool'),
- allowed_address_pairs=dict(type='list', default=None, elements='dict'),
- extra_dhcp_opts=dict(type='list', default=None, elements='dict'),
- device_owner=dict(default=None),
- device_id=dict(default=None),
+ is_port_security_enabled=dict(type='bool', aliases=['port_security_enabled']),
+ security_groups=dict(type='list', elements='str'),
state=dict(default='present', choices=['absent', 'present']),
- vnic_type=dict(default=None,
- choices=['normal', 'direct', 'direct-physical',
- 'macvtap', 'baremetal', 'virtio-forwarder']),
- port_security_enabled=dict(default=None, type='bool'),
- binding_profile=dict(default=None, type='dict'),
- dns_name=dict(type='str', default=None),
- dns_domain=dict(type='str', default=None)
)
module_kwargs = dict(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
],
+ required_if=[
+ ('state', 'present', ('network',)),
+ ],
supports_check_mode=True
)
- def _is_dns_integration_enabled(self):
- """ Check if dns-integraton is enabled """
- for ext in self.conn.network.extensions():
- if ext.alias == 'dns-integration':
- return True
- return False
-
- def _needs_update(self, port):
- """Check for differences in the updatable values.
-
- NOTE: We don't currently allow name updates.
- """
- compare_simple = ['admin_state_up',
- 'mac_address',
- 'device_owner',
- 'device_id',
- 'binding:vnic_type',
- 'port_security_enabled',
- 'binding:profile']
- compare_dns = ['dns_name', 'dns_domain']
- compare_list_dict = ['allowed_address_pairs',
- 'extra_dhcp_opts']
- compare_list = ['security_groups']
-
- if self.conn.has_service('dns') and \
- self._is_dns_integration_enabled():
- for key in compare_dns:
- if self.params[key] is not None and \
- self.params[key] != port[key]:
- return True
-
- for key in compare_simple:
- if self.params[key] is not None and self.params[key] != port[key]:
- return True
- for key in compare_list:
- if (
- self.params[key] is not None
- and set(self.params[key]) != set(port[key])
- ):
- return True
-
- for key in compare_list_dict:
- if not self.params[key]:
- if port.get(key):
- return True
-
- if self.params[key]:
- if not port.get(key):
- return True
-
- # sort dicts in list
- port_ordered = [OrderedDict(sorted(d.items())) for d in port[key]]
- param_ordered = [OrderedDict(sorted(d.items())) for d in self.params[key]]
-
- for d in param_ordered:
- if d not in port_ordered:
- return True
-
- for d in port_ordered:
- if d not in param_ordered:
- return True
-
- # NOTE: if port was created or updated with 'no_security_groups=True',
- # subsequent updates without 'no_security_groups' flag or
- # 'no_security_groups=False' and no specified 'security_groups', will not
- # result in an update to the port where the default security group is
- # applied.
- if self.params['no_security_groups'] and port['security_groups'] != []:
- return True
-
- if self.params['fixed_ips'] is not None:
- for item in self.params['fixed_ips']:
- if 'ip_address' in item:
- # if ip_address in request does not match any in existing port,
- # update is required.
- if not any(match['ip_address'] == item['ip_address']
- for match in port['fixed_ips']):
- return True
- if 'subnet_id' in item:
- return True
- for item in port['fixed_ips']:
- # if ip_address in existing port does not match any in request,
- # update is required.
- if not any(match.get('ip_address') == item['ip_address']
- for match in self.params['fixed_ips']):
- return True
-
- return False
-
- def _system_state_change(self, port):
+ def run(self):
+ network_name_or_id = self.params['network']
+ port_name_or_id = self.params['name']
state = self.params['state']
- if state == 'present':
- if not port:
- return True
- return self._needs_update(port)
- if state == 'absent' and port:
- return True
- return False
-
- def _compose_port_args(self):
- port_kwargs = {}
- optional_parameters = ['name',
- 'fixed_ips',
- 'admin_state_up',
- 'mac_address',
- 'security_groups',
- 'allowed_address_pairs',
- 'extra_dhcp_opts',
- 'device_owner',
- 'device_id',
- 'binding:vnic_type',
- 'port_security_enabled',
- 'binding:profile']
- if self.conn.has_service('dns') and \
- self._is_dns_integration_enabled():
- optional_parameters.extend(['dns_name', 'dns_domain'])
+ network = None
+ if network_name_or_id:
+ network = self.conn.network.find_network(
+ network_name_or_id, ignore_missing=False)
- for optional_param in optional_parameters:
- if self.params[optional_param] is not None:
- port_kwargs[optional_param] = self.params[optional_param]
+ port = self.conn.network.find_port(
+ port_name_or_id,
+ # use network id in query if network parameter was specified
+ **(dict(network_id=network.id) if network else dict()))
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(network, port, state))
+
+ if state == 'present' and not port:
+ # create port
+ port = self._create(network)
+ self.exit_json(changed=True,
+ port=port.to_dict(computed=False))
+ elif state == 'present' and port:
+ # update port
+ update = self._build_update(port)
+ if update:
+ port = self._update(port, update)
+
+ self.exit_json(changed=bool(update),
+ port=port.to_dict(computed=False))
+ elif state == 'absent' and port:
+ # delete port
+ self._delete(port)
+ self.exit_json(changed=True)
+ elif state == 'absent' and not port:
+ # do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, port):
+ update = {}
+
+ # A port's name cannot be updated by this module because
+ # it is used to find ports by name or id.
+ # If name is an id, then we do not have a name to update.
+ # If name is a name actually, then it was used to find a
+ # matching port hence the name is the user defined one
+ # already.
+
+ # updateable port attributes in openstacksdk
+ # (OpenStack API names in braces):
+ # - allowed_address_pairs (allowed_address_pairs)
+ # - binding_host_id (binding:host_id)
+ # - binding_profile (binding:profile)
+ # - binding_vnic_type (binding:vnic_type)
+ # - data_plane_status (data_plane_status)
+ # - description (description)
+ # - device_id (device_id)
+ # - device_owner (device_owner)
+ # (- device_profile (device_profile))
+ # - dns_domain (dns_domain)
+ # - dns_name (dns_name)
+ # - extra_dhcp_opts (extra_dhcp_opts)
+ # - fixed_ips (fixed_ips)
+ # - is_admin_state_up (admin_state_up)
+ # - is_port_security_enabled (port_security_enabled)
+ # - mac_address (mac_address)
+ # - name (name)
+ # - numa_affinity_policy (numa_affinity_policy)
+ # - qos_policy_id (qos_policy_id)
+ # - security_group_ids (security_groups)
+ # Ref.: https://docs.openstack.org/api-ref/network/v2/index.html#update-port
+
+ # Update all known updateable attributes although
+ # our module might not support them yet
+
+ # Update attributes which can be compared straight away
+ port_attributes = dict(
+ (k, self.params[k])
+ for k in ['binding_host_id', 'binding_vnic_type',
+ 'data_plane_status', 'description', 'device_id',
+ 'device_owner', 'is_admin_state_up',
+ 'is_port_security_enabled', 'mac_address',
+ 'numa_affinity_policy']
+ if k in self.params and self.params[k] is not None
+ and self.params[k] != port[k])
+
+ # Compare dictionaries
+ for k in ['binding_profile']:
+ if self.params[k] is None:
+ continue
+
+ if (self.params[k] or port[k]) \
+ and self.params[k] != port[k]:
+ port_attributes[k] = self.params[k]
+
+ # Attribute qos_policy_id is not supported by this module and would
+ # need special handling using self.conn.network.find_qos_policy()
+
+ # Compare attributes which are lists of dictionaries
+ for k in ['allowed_address_pairs', 'extra_dhcp_opts', 'fixed_ips']:
+ if self.params[k] is None:
+ continue
+
+ if (self.params[k] or port[k]) \
+ and self.params[k] != port[k]:
+ port_attributes[k] = self.params[k]
+
+ # Compare security groups
if self.params['no_security_groups']:
- port_kwargs['security_groups'] = []
-
- return port_kwargs
-
- def get_security_group_id(self, security_group_name_or_id):
- security_group = self.conn.get_security_group(security_group_name_or_id)
- if not security_group:
- self.fail_json(msg="Security group: %s, was not found"
- % security_group_name_or_id)
- return security_group['id']
-
- def run(self):
- if not HAS_ORDEREDDICT:
- self.fail_json(msg=missing_required_lib('ordereddict'))
-
- name = self.params['name']
- state = self.params['state']
-
- if self.params['security_groups']:
- # translate security_groups to UUID's if names where provided
- self.params['security_groups'] = [
- self.get_security_group_id(v)
- for v in self.params['security_groups']
+ security_group_ids = []
+ elif self.params['security_groups'] is not None:
+ security_group_ids = [
+ self.conn.network.find_security_group(
+ security_group_name_or_id, ignore_missing=False).id
+ for security_group_name_or_id in self.params['security_groups']
]
+ else:
+ security_group_ids = None
- # Neutron API accept 'binding:vnic_type' as an argument
- # for the port type.
- self.params['binding:vnic_type'] = self.params.pop('vnic_type')
- # Neutron API accept 'binding:profile' as an argument
- # for the port binding profile type.
- self.params['binding:profile'] = self.params.pop('binding_profile')
+ if security_group_ids is not None \
+ and set(security_group_ids) != set(port['security_group_ids']):
+ port_attributes['security_group_ids'] = security_group_ids
- port = None
- network_id = None
- if name:
- port = self.conn.get_port(name)
+ # Compare dns attributes
+ if self.conn.has_service('dns') and \
+ self.conn.network.find_extension('dns-integration'):
+ port_attributes.update(dict(
+ (k, self.params[k])
+ for k in ['dns_name', 'dns_domain']
+ if self.params[k] is not None and self.params[k] != port[k]
+ ))
+
+ if port_attributes:
+ update['port_attributes'] = port_attributes
+ return update
+
+ def _create(self, network):
+ args = {}
+ args['network_id'] = network.id
+
+ # Fetch IDs of security groups next to fail early
+ # if any security group does not exist
+ if self.params['no_security_groups']:
+ args['security_group_ids'] = []
+ elif self.params['security_groups'] is not None:
+ args['security_group_ids'] = [
+ self.conn.network.find_security_group(
+ security_group_name_or_id, ignore_missing=False).id
+ for security_group_name_or_id in self.params['security_groups']
+ ]
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(port))
-
- changed = False
- if state == 'present':
- if not port:
- network = self.params['network']
- if not network:
- self.fail_json(
- msg="Parameter 'network' is required in Port Create"
- )
- port_kwargs = self._compose_port_args()
- network_object = self.conn.get_network(network)
-
- if network_object:
- network_id = network_object['id']
- else:
- self.fail_json(
- msg="Specified network was not found."
- )
-
- port_kwargs['network_id'] = network_id
- port = self.conn.network.create_port(**port_kwargs)
- changed = True
- else:
- if self._needs_update(port):
- port_kwargs = self._compose_port_args()
- port = self.conn.network.update_port(port['id'],
- **port_kwargs)
- changed = True
- self.exit_json(changed=changed, id=port['id'], port=port)
-
- if state == 'absent':
- if port:
- self.conn.delete_port(port['id'])
- changed = True
- self.exit_json(changed=changed)
+ for k in ['allowed_address_pairs',
+ 'binding_profile',
+ 'binding_vnic_type',
+ 'device_id',
+ 'device_owner',
+ 'description',
+ 'extra_dhcp_opts',
+ 'is_admin_state_up',
+ 'mac_address',
+ 'is_port_security_enabled',
+ 'fixed_ips',
+ 'name']:
+ if self.params[k] is not None:
+ args[k] = self.params[k]
+
+ if self.conn.has_service('dns') \
+ and self.conn.network.find_extension('dns-integration'):
+ for k in ['dns_domain', 'dns_name']:
+ if self.params[k] is not None:
+ args[k] = self.params[k]
+
+ return self.conn.network.create_port(**args)
+
+ def _delete(self, port):
+ self.conn.network.delete_port(port.id)
+
+ def _update(self, port, update):
+ port_attributes = update.get('port_attributes')
+ if port_attributes:
+ port = self.conn.network.update_port(port, **port_attributes)
+ return port
+
+ def _will_change(self, port, state):
+ if state == 'present' and not port:
+ return True
+ elif state == 'present' and port:
+ return bool(self._build_update(port))
+ elif state == 'absent' and port:
+ return True
+ else:
+ # state == 'absent' and not port:
+ return False
def main():
- module = NetworkPortModule()
+ module = PortModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/port_info.py b/ansible_collections/openstack/cloud/plugins/modules/port_info.py
index 0ed3f0599..f8229ac13 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/port_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/port_info.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -9,24 +10,18 @@ short_description: Retrieve information about ports within OpenStack.
author: OpenStack Ansible SIG
description:
- Retrieve information about ports from OpenStack.
- - This module was called C(openstack.cloud.port_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.port_info) module no longer returns C(ansible_facts)!
options:
- port:
+ name:
description:
- Unique name or ID of a port.
type: str
+ aliases: ['port']
filters:
description:
- A dictionary of meta data to use for further filtering. Elements
- of this dictionary will be matched against the returned port
- dictionaries. Matching is currently limited to strings within
- the port dictionary, or strings within nested dictionaries.
+ of this dictionary will be matched passed to the API as query
+ parameter filters.
type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -38,15 +33,15 @@ EXAMPLES = '''
register: result
- debug:
- msg: "{{ result.openstack_ports }}"
+ msg: "{{ result.ports}}"
# Gather information about a single port
- openstack.cloud.port_info:
cloud: mycloud
- port: 6140317d-e676-31e1-8a4a-b1913814a471
+ name: 6140317d-e676-31e1-8a4a-b1913814a471
-# Gather information about all ports that have device_id set to a specific value
-# and with a status of ACTIVE.
+# Gather information about all ports that have device_id set to a specific
+# value and with a status of ACTIVE.
- openstack.cloud.port_info:
cloud: mycloud
filters:
@@ -55,96 +50,133 @@ EXAMPLES = '''
'''
RETURN = '''
-openstack_ports:
- description: List of port dictionaries. A subset of the dictionary keys
- listed below may be returned, depending on your cloud provider.
- returned: always, but can be null
- type: complex
+ports:
+ description: |
+ List of port dictionaries. A subset of the dictionary keys listed below
+ may be returned, depending on your cloud provider.
+ returned: always
+ type: list
+ elements: dict
contains:
- admin_state_up:
- description: The administrative state of the router, which is
- up (true) or down (false).
- returned: success
- type: bool
- sample: true
allowed_address_pairs:
- description: A set of zero or more allowed address pairs. An
- address pair consists of an IP address and MAC address.
+ description: Allowed address pairs.
returned: success
type: list
sample: []
- "binding:host_id":
- description: The UUID of the host where the port is allocated.
+ binding_host_id:
+ description: |
+ The ID of the host where the port is allocated. In some cases,
+ different implementations can run on different hosts.
returned: success
type: str
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
- "binding:profile":
- description: A dictionary the enables the application running on
- the host to pass and receive VIF port-specific
- information to the plug-in.
+ binding_profile:
+ description: |
+ A dictionary the enables the application running on the
+ specified host to pass and receive vif port-specific
+ information to the plug-in.
returned: success
type: dict
sample: {}
- "binding:vif_details":
- description: A dictionary that enables the application to pass
- information about functions that the Networking API
- provides.
+ binding_vif_details:
+ description: |
+ A dictionary that enables the application to pass
+ information about functions that the Networking API provides.
returned: success
type: dict
- sample: {"port_filter": true}
- "binding:vif_type":
+ binding_vif_type:
description: The VIF type for the port.
returned: success
type: dict
- sample: "ovs"
- "binding:vnic_type":
- description: The virtual network interface card (vNIC) type that is
- bound to the neutron port.
+ binding_vnic_type:
+ description: |
+ The virtual network interface card (vNIC) type that is
+ bound to the neutron port.
returned: success
type: str
sample: "normal"
+ created_at:
+ description: Timestamp when the port was created.
+ returned: success
+ type: str
+ sample: "2022-02-03T13:28:25Z"
+ data_plane_status:
+ description: Status of the underlying data plane of a port.
+ returned: success
+ type: str
+ description:
+ description: The port description.
+ returned: success
+ type: str
device_id:
- description: The UUID of the device that uses this port.
+ description: Device ID of this port.
returned: success
type: str
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
device_owner:
- description: The UUID of the entity that uses this port.
+ description: Device owner of this port, e.g. C(network:dhcp).
returned: success
type: str
sample: "network:router_interface"
+ device_profile:
+ description: |
+ Device profile of this port, refers to Cyborg device-profiles:
+ https://docs.openstack.org/api-ref/accelerator/v2/index.html#
+ device-profiles.
+ returned: success
+ type: str
dns_assignment:
- description: DNS assignment information.
+ description: DNS assignment for the port.
returned: success
type: list
+ dns_domain:
+ description: DNS domain assigned to the port.
+ returned: success
+ type: str
dns_name:
- description: DNS name
+ description: DNS name for the port.
returned: success
type: str
- sample: ""
extra_dhcp_opts:
- description: A set of zero or more extra DHCP option pairs.
- An option pair consists of an option value and name.
+ description: |
+ A set of zero or more extra DHCP option pairs.
+ An option pair consists of an option value and name.
returned: success
type: list
sample: []
fixed_ips:
- description: The IP addresses for the port. Includes the IP address
- and UUID of the subnet.
+ description: |
+ IP addresses for the port. Includes the IP address and subnet
+ ID.
returned: success
type: list
id:
- description: The UUID of the port.
+ description: The port ID.
returned: success
type: str
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
- ip_address:
- description: The IP address.
+ ip_allocation:
+ description: |
+ The ip_allocation indicates when ports use deferred,
+ immediate or no IP allocation.
returned: success
type: str
- sample: "127.0.0.1"
+ is_admin_state_up:
+ description: |
+ The administrative state of the port, which is up C(True) or
+ down C(False).
+ returned: success
+ type: bool
+ sample: true
+ is_port_security_enabled:
+ description: |
+ The port security status, which is enabled C(True) or disabled
+ C(False).
+ returned: success
+ type: bool
+ sample: false
mac_address:
- description: The MAC address.
+ description: The MAC address of an allowed address pair.
returned: success
type: str
sample: "00:00:5E:00:53:42"
@@ -154,55 +186,106 @@ openstack_ports:
type: str
sample: "port_name"
network_id:
- description: The UUID of the attached network.
+ description: The ID of the attached network.
returned: success
type: str
sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
- port_security_enabled:
- description: The port security status. The status is enabled (true) or disabled (false).
+ numa_affinity_policy:
+ description: |
+ The NUMA affinity policy defined for this port.
+ returned: success
+ type: str
+ sample: "required"
+ project_id:
+ description: The ID of the project who owns the network.
+ returned: success
+ type: str
+ sample: "aa1ede4f-3952-4131-aab6-3b8902268c7d"
+ propagate_uplink_status:
+ description: Whether to propagate uplink status of the port.
returned: success
type: bool
sample: false
- security_groups:
- description: The UUIDs of any attached security groups.
+ qos_network_policy_id:
+ description: |
+ The ID of the QoS policy attached to the network where the
+ port is bound.
+ returned: success
+ type: str
+ sample: "1e4f3958-c0c9-4dec-82fa-ed2dc1c5cb34"
+ qos_policy_id:
+ description: The ID of the QoS policy attached to the port.
+ returned: success
+ type: str
+ sample: "b20bb47f-5d6d-45a6-8fe7-2c1b44f0db73"
+ resource_request:
+ description: |
+ The port-resource-request exposes Placement resources
+ (i.e.: minimum-bandwidth) and traits (i.e.: vnic-type, physnet)
+ requested by a port to Nova and Placement.
+ returned: success
+ type: str
+ revision_number:
+ description: The revision number of the resource.
+ returned: success
+ type: int
+ sample: 0
+ security_group_ids:
+ description: The IDs of any attached security groups.
returned: success
type: list
status:
- description: The port status.
+ description: The port status. Value is C(ACTIVE) or C(DOWN).
returned: success
type: str
sample: "ACTIVE"
+ tags:
+ description: The list of tags on the resource.
+ returned: success
+ type: list
+ sample: []
tenant_id:
- description: The UUID of the tenant who owns the network.
+ description: Same as I(project_id). Deprecated.
returned: success
type: str
sample: "51fce036d7984ba6af4f6c849f65ef00"
+ trunk_details:
+ description: |
+ The trunk referring to this parent port and its subports.
+ Present for trunk parent ports if C(trunk-details) extension
+ is loaded.
+ returned: success
+ type: dict
+ updated_at:
+ description: Timestamp when the port was last updated.
+ returned: success
+ type: str
+ sample: "2022-02-03T13:28:25Z"
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-class NetworkPortInfoModule(OpenStackModule):
+class PortInfoModule(OpenStackModule):
argument_spec = dict(
- port=dict(required=False),
- filters=dict(type='dict', required=False),
+ name=dict(aliases=['port']),
+ filters=dict(type='dict'),
)
module_kwargs = dict(
supports_check_mode=True
)
- deprecated_names = ('openstack.cloud.port_facts')
-
def run(self):
- port = self.params.get('port')
- filters = self.params.get('filters')
+ ports = [p.to_dict(computed=False) for p in
+ self.conn.search_ports(
+ name_or_id=self.params['name'],
+ filters=self.params['filters'])]
- ports = self.conn.search_ports(port, filters)
- self.exit_json(changed=False, openstack_ports=ports)
+ self.exit_json(changed=False, ports=ports)
def main():
- module = NetworkPortInfoModule()
+ module = PortInfoModule()
module()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/project.py b/ansible_collections/openstack/cloud/plugins/modules/project.py
index 9719452dc..7db66012a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/project.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/project.py
@@ -1,102 +1,111 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2015 IBM Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: project
-short_description: Manage OpenStack Projects
+short_description: Manage OpenStack Identity (Keystone) projects
author: OpenStack Ansible SIG
description:
- - Manage OpenStack Projects. Projects can be created,
- updated or deleted using this module. A project will be updated
- if I(name) matches an existing project and I(state) is present.
- The value for I(name) cannot be updated without deleting and
- re-creating the project.
+ - Create, update or delete a OpenStack Identity (Keystone) project.
options:
- name:
- description:
- - Name for the project
- required: true
- type: str
- description:
- description:
- - Description for the project
- type: str
- domain_id:
- description:
- - Domain id to create the project in if the cloud supports domains.
- aliases: ['domain']
- type: str
- enabled:
- description:
- - Is the project enabled
- type: bool
- default: 'yes'
- properties:
- description:
- - Additional properties to be associated with this project. Requires
- openstacksdk>0.45.
- type: dict
- required: false
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ name:
+ description:
+ - Name for the project.
+ - This attribute cannot be updated.
+ required: true
+ type: str
+ description:
+ description:
+ - Description for the project.
+ type: str
+ domain:
+ description:
+ - Domain name or id to create the project in if the cloud supports
+ domains.
+ aliases: ['domain_id']
+ type: str
+ extra_specs:
+ description:
+ - Additional properties to be associated with this project.
+ type: dict
+ aliases: ['properties']
+ is_enabled:
+ description:
+ - Whether this project is enabled or not.
+ aliases: ['enabled']
+ type: bool
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a project
-- openstack.cloud.project:
+EXAMPLES = r'''
+- name: Create a project
+ openstack.cloud.project:
cloud: mycloud
- endpoint_type: admin
- state: present
- name: demoproject
description: demodescription
- domain_id: demoid
- enabled: True
- properties:
+ domain: demoid
+ is_enabled: True
+ name: demoproject
+ extra_specs:
internal_alias: demo_project
+ state: present
-# Delete a project
-- openstack.cloud.project:
+- name: Delete a project
+ openstack.cloud.project:
cloud: mycloud
endpoint_type: admin
- state: absent
name: demoproject
+ state: absent
'''
-
-RETURN = '''
+RETURN = r'''
project:
- description: Dictionary describing the project.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Project ID
- type: str
- sample: "f59382db809c43139982ca4189404650"
- name:
- description: Project name
- type: str
- sample: "demoproject"
- description:
- description: Project description
- type: str
- sample: "demodescription"
- enabled:
- description: Boolean to indicate if project is enabled
- type: bool
- sample: True
+ description: Dictionary describing the project.
+ returned: On success when I(state) is C(present).
+ type: dict
+ contains:
+ description:
+ description: Project description
+ type: str
+ sample: "demodescription"
+ domain_id:
+ description: Domain ID to which the project belongs
+ type: str
+ sample: "default"
+ id:
+ description: Project ID
+ type: str
+ sample: "f59382db809c43139982ca4189404650"
+ is_domain:
+ description: Indicates whether the project also acts as a domain.
+ type: bool
+ is_enabled:
+ description: Indicates whether the project is enabled
+ type: bool
+ name:
+ description: Project name
+ type: str
+ sample: "demoproject"
+ options:
+ description: The resource options for the project
+ type: dict
+ parent_id:
+ description: The ID of the parent of the project
+ type: str
+ tags:
+ description: A list of associated tags
+ type: list
+ elements: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -104,111 +113,137 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class IdentityProjectModule(OpenStackModule):
argument_spec = dict(
+ description=dict(),
+ domain=dict(aliases=['domain_id']),
+ extra_specs=dict(type='dict', aliases=['properties']),
+ is_enabled=dict(type='bool', aliases=['enabled']),
name=dict(required=True),
- description=dict(required=False),
- domain_id=dict(required=False, aliases=['domain']),
- properties=dict(required=False, type='dict', min_ver='0.45.1'),
- enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present'])
)
module_kwargs = dict(
supports_check_mode=True
)
- def _needs_update(self, project):
- keys = ('description', 'enabled')
- for key in keys:
- if self.params[key] is not None and self.params[key] != project.get(key):
- return True
-
- properties = self.params['properties']
- if properties:
- project_properties = project.get('properties')
- for k, v in properties.items():
- if v is not None and (k not in project_properties or v != project_properties[k]):
- return True
-
- return False
-
- def _system_state_change(self, project):
- state = self.params['state']
- if state == 'present':
- if project is None:
- changed = True
- else:
- if self._needs_update(project):
- changed = True
- else:
- changed = False
-
- elif state == 'absent':
- changed = project is not None
-
- return changed
-
def run(self):
- name = self.params['name']
- description = self.params['description']
- domain = self.params['domain_id']
- enabled = self.params['enabled']
- properties = self.params['properties'] or {}
state = self.params['state']
- if domain:
- try:
- # We assume admin is passing domain id
- dom = self.conn.get_domain(domain)['id']
- domain = dom
- except Exception:
- # If we fail, maybe admin is passing a domain name.
- # Note that domains have unique names, just like id.
- try:
- dom = self.conn.search_domains(filters={'name': domain})[0]['id']
- domain = dom
- except Exception:
- # Ok, let's hope the user is non-admin and passing a sane id
- pass
-
- if domain:
- project = self.conn.get_project(name, domain_id=domain)
- else:
- project = self.conn.get_project(name)
+ project = self._find()
if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(project))
-
- if state == 'present':
- if project is None:
- project = self.conn.create_project(
- name=name, description=description,
- domain_id=domain,
- enabled=enabled)
- changed = True
-
- project = self.conn.update_project(
- project['id'],
- description=description,
- enabled=enabled,
- **properties)
- else:
- if self._needs_update(project):
- project = self.conn.update_project(
- project['id'],
- description=description,
- enabled=enabled,
- **properties)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, project=project)
-
- elif state == 'absent':
- if project is None:
- changed = False
- else:
- self.conn.delete_project(project['id'])
- changed = True
- self.exit_json(changed=changed)
+ self.exit_json(changed=self._will_change(state, project))
+
+ if state == 'present' and not project:
+ # Create project
+ project = self._create()
+ self.exit_json(changed=True,
+ project=project.to_dict(computed=False))
+
+ elif state == 'present' and project:
+ # Update project
+ update = self._build_update(project)
+ if update:
+ project = self._update(project, update)
+
+ self.exit_json(changed=bool(update),
+ project=project.to_dict(computed=False))
+
+ elif state == 'absent' and project:
+ # Delete project
+ self._delete(project)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not project:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, project):
+ update = {}
+
+ # Params name and domain are being used to find this project.
+
+ non_updateable_keys = [k for k in []
+ if self.params[k] is not None
+ and self.params[k] != project[k]]
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['description', 'is_enabled']
+ if self.params[k] is not None
+ and self.params[k] != project[k])
+
+ extra_specs = self.params['extra_specs']
+ if extra_specs:
+ duplicate_keys = set(attributes.keys()) & set(extra_specs.keys())
+ if duplicate_keys:
+ raise ValueError('Duplicate key(s) in extra_specs: {0}'
+ .format(', '.join(list(duplicate_keys))))
+ for k, v in extra_specs.items():
+ if v != project[k]:
+ attributes[k] = v
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'is_enabled', 'name']
+ if self.params[k] is not None)
+
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id is not None:
+ domain = self.conn.identity.find_domain(domain_name_or_id,
+ ignore_missing=False)
+ kwargs['domain_id'] = domain.id
+
+ extra_specs = self.params['extra_specs']
+ if extra_specs:
+ duplicate_keys = set(kwargs.keys()) & set(extra_specs.keys())
+ if duplicate_keys:
+ raise ValueError('Duplicate key(s) in extra_specs: {0}'
+ .format(', '.join(list(duplicate_keys))))
+ kwargs = dict(kwargs, **extra_specs)
+
+ return self.conn.identity.create_project(**kwargs)
+
+ def _delete(self, project):
+ self.conn.identity.delete_project(project.id)
+
+ def _find(self):
+ name = self.params['name']
+ kwargs = {}
+
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id is not None:
+ domain = self.conn.identity.find_domain(domain_name_or_id,
+ ignore_missing=False)
+ kwargs['domain_id'] = domain.id
+
+ return self.conn.identity.find_project(name_or_id=name,
+ **kwargs)
+
+ def _update(self, project, update):
+ attributes = update.get('attributes')
+ if attributes:
+ project = self.conn.identity.update_project(project.id,
+ **attributes)
+
+ return project
+
+ def _will_change(self, state, project):
+ if state == 'present' and not project:
+ return True
+ elif state == 'present' and project:
+ return bool(self._build_update(project))
+ elif state == 'absent' and project:
+ return True
+ else:
+ # state == 'absent' and not project:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/project_access.py b/ansible_collections/openstack/cloud/plugins/modules/project_access.py
deleted file mode 100644
index c49a84495..000000000
--- a/ansible_collections/openstack/cloud/plugins/modules/project_access.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: project_access
-short_description: Manage OpenStack compute flavors access
-author: OpenStack Ansible SIG
-description:
- - Add or remove flavor, volume_type or other resources access
- from OpenStack.
-options:
- state:
- description:
- - Indicate desired state of the resource.
- choices: ['present', 'absent']
- required: false
- default: present
- type: str
- target_project_id:
- description:
- - Project id.
- required: true
- type: str
- resource_type:
- description:
- - The resource type (eg. nova_flavor, cinder_volume_type).
- required: true
- type: str
- resource_name:
- description:
- - The resource name (eg. tiny).
- required: true
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
-
-extends_documentation_fragment:
-- openstack.cloud.openstack
-'''
-
-EXAMPLES = '''
-- name: "Enable access to tiny flavor to your tenant."
- openstack.cloud.project_access:
- cloud: mycloud
- state: present
- target_project_id: f0f1f2f3f4f5f67f8f9e0e1
- resource_name: tiny
- resource_type: nova_flavor
-
-
-- name: "Disable access to the given flavor to project"
- openstack.cloud.project_access:
- cloud: mycloud
- state: absent
- target_project_id: f0f1f2f3f4f5f67f8f9e0e1
- resource_name: tiny
- resource_type: nova_flavor
-'''
-
-RETURN = '''
-flavor:
- description: Dictionary describing the flavor.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: Flavor ID.
- returned: success
- type: str
- sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
- name:
- description: Flavor name.
- returned: success
- type: str
- sample: "tiny"
-
-'''
-
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-
-
-class IdentityProjectAccess(OpenStackModule):
- argument_spec = dict(
- state=dict(required=False, default='present',
- choices=['absent', 'present']),
- target_project_id=dict(required=True, type='str'),
- resource_type=dict(required=True, type='str'),
- resource_name=dict(required=True, type='str'),
- )
-
- module_kwargs = dict(
- supports_check_mode=True,
- required_if=[
- ('state', 'present', ['target_project_id'])
- ]
- )
-
- def run(self):
- state = self.params['state']
- resource_name = self.params['resource_name']
- resource_type = self.params['resource_type']
- target_project_id = self.params['target_project_id']
-
- if resource_type == 'nova_flavor':
- # returns Munch({'NAME_ATTR': 'name',
- # 'tenant_id': u'37e55da59ec842649d84230f3a24eed5',
- # 'HUMAN_ID': False,
- # 'flavor_id': u'6d4d37b9-0480-4a8c-b8c9-f77deaad73f9',
- # 'request_ids': [], 'human_id': None}),
- _get_resource = self.conn.get_flavor
- _list_resource_access = self.conn.list_flavor_access
- _add_resource_access = self.conn.add_flavor_access
- _remove_resource_access = self.conn.remove_flavor_access
- elif resource_type == 'cinder_volume_type':
- # returns [Munch({
- # 'project_id': u'178cdb9955b047eea7afbe582038dc94',
- # 'properties': {'request_ids': [], 'NAME_ATTR': 'name',
- # 'human_id': None,
- # 'HUMAN_ID': False},
- # 'id': u'd5573023-b290-42c8-b232-7c5ca493667f'}),
- _get_resource = self.conn.get_volume_type
- _list_resource_access = self.conn.get_volume_type_access
- _add_resource_access = self.conn.add_volume_type_access
- _remove_resource_access = self.conn.remove_volume_type_access
- else:
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Not implemented.")
-
- resource = _get_resource(resource_name)
- if not resource:
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Not found.")
- resource_id = getattr(resource, 'id', resource['id'])
- # _list_resource_access returns a list of dicts containing 'project_id'
- acls = _list_resource_access(resource_id)
-
- if not all(acl.get('project_id') for acl in acls):
- self.exit_json(
- changed=False,
- resource_name=resource_name,
- resource_type=resource_type,
- error="Missing project_id in resource output.")
- allowed_tenants = [acl['project_id'] for acl in acls]
-
- changed_access = any((
- state == 'present' and target_project_id not in allowed_tenants,
- state == 'absent' and target_project_id in allowed_tenants
- ))
- if self.ansible.check_mode or not changed_access:
- self.exit_json(
- changed=changed_access, resource=resource, id=resource_id)
-
- if state == 'present':
- _add_resource_access(
- resource_id, target_project_id
- )
- elif state == 'absent':
- _remove_resource_access(
- resource_id, target_project_id
- )
-
- self.exit_json(
- changed=True, resource=resource, id=resource_id)
-
-
-def main():
- module = IdentityProjectAccess()
- module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/project_info.py b/ansible_collections/openstack/cloud/plugins/modules/project_info.py
index fb1e2767d..6ecbd87f0 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/project_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/project_info.py
@@ -1,150 +1,129 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: project_info
short_description: Retrieve information about one or more OpenStack projects
author: OpenStack Ansible SIG
description:
- - Retrieve information about a one or more OpenStack projects
- - This module was called C(openstack.cloud.project_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.project_info) module no longer returns C(ansible_facts)!
+ - Retrieve information about a one or more OpenStack projects
options:
- name:
- description:
- - Name or ID of the project
- type: str
- domain:
- description:
- - Name or ID of the domain containing the project if the cloud supports domains
- type: str
- filters:
- description:
- - A dictionary of meta data to use for further filtering. Elements of
- this dictionary may be additional dictionaries.
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ name:
+ description:
+ - Name or ID of the project.
+ type: str
+ domain:
+ description:
+ - Name or ID of the domain containing the project.
+ type: str
+ filters:
+ description:
+ - A dictionary of meta data to use for filtering projects.
+ - Elements of I(filters) are passed as query parameters to
+ OpenStack Identity API.
+ type: dict
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Gather information about previously created projects
-- openstack.cloud.project_info:
+EXAMPLES = r'''
+- name: Fetch all Identity (Keystone) projects
+ openstack.cloud.project_info:
cloud: awesomecloud
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-# Gather information about a previously created project by name
-- openstack.cloud.project_info:
+- name: Fetch all projects with a name
+ openstack.cloud.project_info:
cloud: awesomecloud
name: demoproject
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-# Gather information about a previously created project in a specific domain
-- openstack.cloud.project_info:
+- name: Fetch all projects with a name in a domain
+ openstack.cloud.project_info:
cloud: awesomecloud
name: demoproject
domain: admindomain
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
-# Gather information about a previously created project in a specific domain with filter
-- openstack.cloud.project_info:
+- name: Fetch all disabled projects
+ openstack.cloud.project_info:
cloud: awesomecloud
- name: demoproject
- domain: admindomain
filters:
- enabled: False
- register: result
-- debug:
- msg: "{{ result.openstack_projects }}"
+ is_enabled: false
'''
-
-RETURN = '''
-openstack_projects:
- description: has all the OpenStack information about projects
- returned: always, but can be null
- type: complex
- contains:
- id:
- description: Unique UUID.
- returned: success
- type: str
- name:
- description: Name given to the project.
- returned: success
- type: str
- description:
- description: Description of the project
- returned: success
- type: str
- enabled:
- description: Flag to indicate if the project is enabled
- returned: success
- type: bool
- domain_id:
- description: Domain ID containing the project (keystone v3 clouds only)
- returned: success
- type: bool
+RETURN = r'''
+projects:
+ description: List of dictionaries describing Identity (Keystone) projects.
+ elements: dict
+ returned: always, but can be empty
+ type: list
+ contains:
+ description:
+ description: Project description
+ type: str
+ sample: "demodescription"
+ domain_id:
+ description: Domain ID to which the project belongs
+ type: str
+ sample: "default"
+ id:
+ description: Project ID
+ type: str
+ sample: "f59382db809c43139982ca4189404650"
+ is_domain:
+ description: Indicates whether the project also acts as a domain.
+ type: bool
+ is_enabled:
+ description: Indicates whether the project is enabled
+ type: bool
+ name:
+ description: Project name
+ type: str
+ sample: "demoproject"
+ options:
+ description: The resource options for the project
+ type: dict
+ parent_id:
+ description: The ID of the parent of the project
+ type: str
+ tags:
+ description: A list of associated tags
+ type: list
+ elements: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class IdentityProjectInfoModule(OpenStackModule):
- deprecated_names = ('project_facts', 'openstack.cloud.project_facts')
-
argument_spec = dict(
- name=dict(required=False),
- domain=dict(required=False),
- filters=dict(required=False, type='dict'),
+ domain=dict(),
+ name=dict(),
+ filters=dict(type='dict'),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- name = self.params['name']
- domain = self.params['domain']
- filters = self.params['filters']
- is_old_facts = self.module_name == 'openstack.cloud.project_facts'
-
- if domain:
- try:
- # We assume admin is passing domain id
- dom = self.conn.get_domain(domain)['id']
- domain = dom
- except Exception:
- # If we fail, maybe admin is passing a domain name.
- # Note that domains have unique names, just like id.
- dom = self.conn.search_domains(filters={'name': domain})
- if dom:
- domain = dom[0]['id']
- else:
- self.fail_json(msg='Domain name or ID does not exist')
-
- if not filters:
- filters = {}
-
- filters['domain_id'] = domain
-
- projects = self.conn.search_projects(name, filters)
- if is_old_facts:
- self.exit_json(changed=False, ansible_facts=dict(
- openstack_projects=projects))
- else:
- self.exit_json(changed=False, openstack_projects=projects)
+ filters = self.params['filters'] or {}
+
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id is not None:
+ domain = self.conn.identity.find_domain(domain_name_or_id)
+
+ if not domain:
+ self.exit_json(changed=False, projects=[])
+
+ filters['domain_id'] = domain.id
+
+ projects = self.conn.search_projects(name_or_id=self.params['name'],
+ filters=filters)
+
+ self.exit_json(changed=False,
+ projects=[p.to_dict(computed=False) for p in projects])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/quota.py b/ansible_collections/openstack/cloud/plugins/modules/quota.py
index 0d6a4f04c..572d1d7fb 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/quota.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/quota.py
@@ -1,4 +1,6 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Pason System Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -12,17 +14,6 @@ description:
updated or deleted using this module. A quota will be updated
if matches an existing project and is present.
options:
- name:
- description:
- - Name of the OpenStack Project to manage.
- required: true
- type: str
- state:
- description:
- - A value of present sets the quota and a value of absent resets the quota to system defaults.
- default: present
- type: str
- choices: ['absent', 'present']
backup_gigabytes:
description: Maximum size of backups in GB's.
type: int
@@ -33,75 +24,89 @@ options:
description: Maximum number of CPU's per project.
type: int
fixed_ips:
- description: Number of fixed IP's to allow.
+ description:
+ - Number of fixed IP's to allow.
+ - Available until Nova API version 2.35.
type: int
floating_ips:
- description: Number of floating IP's to allow in Compute.
- aliases: ['compute_floating_ips']
- type: int
- floatingip:
- description: Number of floating IP's to allow in Network.
- aliases: ['network_floating_ips']
+ description: Number of floating IP's to allow.
+ aliases: [compute_floating_ips, floatingip, network_floating_ips]
type: int
gigabytes:
description: Maximum volume storage allowed for project.
type: int
- gigabytes_types:
+ groups:
+ description: Number of groups that are allowed for the project
+ type: int
+ injected_file_content_bytes:
description:
- - Per driver volume storage quotas. Keys should be
- prefixed with C(gigabytes_) values should be ints.
- type: dict
- injected_file_size:
- description: Maximum file size in bytes.
+ - Maximum file size in bytes.
+ - Available until Nova API version 2.56.
type: int
+ aliases: [injected_file_size]
injected_files:
- description: Number of injected files to allow.
+ description:
+ - Number of injected files to allow.
+ - Available until Nova API version 2.56.
type: int
- injected_path_size:
- description: Maximum path size.
+ injected_file_path_bytes:
+ description:
+ - Maximum path size.
+ - Available until Nova API version 2.56.
type: int
+ aliases: [injected_path_size]
instances:
description: Maximum number of instances allowed.
type: int
key_pairs:
description: Number of key pairs to allow.
type: int
- loadbalancer:
- description: Number of load balancers to allow.
+ load_balancers:
+ description: The maximum amount of load balancers you can create
type: int
+ aliases: [loadbalancer]
metadata_items:
description: Number of metadata items allowed per instance.
type: int
- network:
+ name:
+ description: Name of the OpenStack Project to manage.
+ required: true
+ type: str
+ networks:
description: Number of networks to allow.
type: int
+ aliases: [network]
per_volume_gigabytes:
description: Maximum size in GB's of individual volumes.
type: int
- pool:
- description: Number of load balancer pools to allow.
+ pools:
+ description: The maximum number of pools you can create
type: int
- port:
- description: Number of Network ports to allow, this needs to be greater than the instances limit.
- type: int
- properties:
- description: Number of properties to allow.
+ aliases: [pool]
+ ports:
+ description: Number of Network ports to allow, this needs to be greater
+ than the instances limit.
type: int
+ aliases: [port]
ram:
description: Maximum amount of ram in MB to allow.
type: int
- rbac_policy:
+ rbac_policies:
description: Number of policies to allow.
type: int
- router:
+ aliases: [rbac_policy]
+ routers:
description: Number of routers to allow.
type: int
- security_group_rule:
+ aliases: [router]
+ security_group_rules:
description: Number of rules per security group to allow.
type: int
- security_group:
+ aliases: [security_group_rule]
+ security_groups:
description: Number of security groups to allow.
type: int
+ aliases: [security_group]
server_group_members:
description: Number of server group members to allow.
type: int
@@ -111,112 +116,185 @@ options:
snapshots:
description: Number of snapshots to allow.
type: int
- snapshots_types:
- description:
- - Per-driver volume snapshot quotas. Keys should be
- prefixed with C(snapshots_) values should be ints.
- type: dict
- subnet:
+ state:
+ description: A value of C(present) sets the quota and a value of
+ C(absent) resets the quota to defaults.
+ default: present
+ type: str
+ choices: [absent, present]
+ subnets:
description: Number of subnets to allow.
type: int
- subnetpool:
+ aliases: [subnet]
+ subnet_pools:
description: Number of subnet pools to allow.
type: int
+ aliases: [subnetpool]
volumes:
description: Number of volumes to allow.
type: int
- volumes_types:
- description:
- - Per-driver volume count quotas. Keys should be
- prefixed with C(volumes_) values should be ints.
- type: dict
- project:
- description: Unused, kept for compatability
- type: int
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk >= 0.13.0"
- - "keystoneauth1 >= 3.4.0"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
-# List a Project Quota
-- openstack.cloud.quota:
+- name: Fetch current project quota
+ openstack.cloud.quota:
cloud: mycloud
name: demoproject
-# Set a Project back to the defaults
-- openstack.cloud.quota:
+- name: Reset project quota back to defaults
+ openstack.cloud.quota:
cloud: mycloud
name: demoproject
state: absent
-# Update a Project Quota for cores
-- openstack.cloud.quota:
+- name: Change number of cores and volumes
+ openstack.cloud.quota:
cloud: mycloud
name: demoproject
cores: 100
-
-# Update a Project Quota
-- openstack.cloud.quota:
- name: demoproject
- cores: 1000
volumes: 20
- volumes_type:
- - volume_lvm: 10
-# Complete example based on list of projects
-- name: Update quotas
+- name: Update quota again
openstack.cloud.quota:
- name: "{{ item.name }}"
- backup_gigabytes: "{{ item.backup_gigabytes }}"
- backups: "{{ item.backups }}"
- cores: "{{ item.cores }}"
- fixed_ips: "{{ item.fixed_ips }}"
- floating_ips: "{{ item.floating_ips }}"
- floatingip: "{{ item.floatingip }}"
- gigabytes: "{{ item.gigabytes }}"
- injected_file_size: "{{ item.injected_file_size }}"
- injected_files: "{{ item.injected_files }}"
- injected_path_size: "{{ item.injected_path_size }}"
- instances: "{{ item.instances }}"
- key_pairs: "{{ item.key_pairs }}"
- loadbalancer: "{{ item.loadbalancer }}"
- metadata_items: "{{ item.metadata_items }}"
- per_volume_gigabytes: "{{ item.per_volume_gigabytes }}"
- pool: "{{ item.pool }}"
- port: "{{ item.port }}"
- properties: "{{ item.properties }}"
- ram: "{{ item.ram }}"
- security_group_rule: "{{ item.security_group_rule }}"
- security_group: "{{ item.security_group }}"
- server_group_members: "{{ item.server_group_members }}"
- server_groups: "{{ item.server_groups }}"
- snapshots: "{{ item.snapshots }}"
- volumes: "{{ item.volumes }}"
- volumes_types:
- volumes_lvm: "{{ item.volumes_lvm }}"
- snapshots_types:
- snapshots_lvm: "{{ item.snapshots_lvm }}"
- gigabytes_types:
- gigabytes_lvm: "{{ item.gigabytes_lvm }}"
- with_items:
- - "{{ projects }}"
- when: item.state == "present"
+ cloud: mycloud
+ name: demo_project
+ floating_ips: 5
+ networks: 50
+ ports: 300
+ rbac_policies: 5
+ routers: 5
+ subnets: 5
+ subnet_pools: 5
+ security_group_rules: 5
+ security_groups: 5
+ backup_gigabytes: 500
+ backups: 5
+ gigabytes: 500
+ groups: 1
+ pools: 5
+ per_volume_gigabytes: 10
+ snapshots: 5
+ volumes: 5
+ cores: 5
+ instances: 5
+ key_pairs: 5
+ metadata_items: 5
+ ram: 5
+ server_groups: 5
+ server_group_members: 5
+
'''
RETURN = '''
-openstack_quotas:
+quotas:
description: Dictionary describing the project quota.
returned: Regardless if changes where made or not
type: dict
+ contains:
+ compute:
+ description: Compute service quotas
+ type: dict
+ contains:
+ cores:
+ description: Maximum number of CPU's per project.
+ type: int
+ injected_file_content_bytes:
+ description: Maximum file size in bytes.
+ type: int
+ injected_files:
+ description: Number of injected files to allow.
+ type: int
+ injected_file_path_bytes:
+ description: Maximum path size.
+ type: int
+ instances:
+ description: Maximum number of instances allowed.
+ type: int
+ key_pairs:
+ description: Number of key pairs to allow.
+ type: int
+ metadata_items:
+ description: Number of metadata items allowed per instance.
+ type: int
+ ram:
+ description: Maximum amount of ram in MB to allow.
+ type: int
+ server_group_members:
+ description: Number of server group members to allow.
+ type: int
+ server_groups:
+ description: Number of server groups to allow.
+ type: int
+ network:
+ description: Network service quotas
+ type: dict
+ contains:
+ floating_ips:
+ description: Number of floating IP's to allow.
+ type: int
+ load_balancers:
+ description: The maximum amount of load balancers one can
+ create
+ type: int
+ networks:
+ description: Number of networks to allow.
+ type: int
+ pools:
+ description: The maximum amount of pools one can create.
+ type: int
+ ports:
+ description: Number of Network ports to allow, this needs
+ to be greater than the instances limit.
+ type: int
+ rbac_policies:
+ description: Number of policies to allow.
+ type: int
+ routers:
+ description: Number of routers to allow.
+ type: int
+ security_group_rules:
+ description: Number of rules per security group to allow.
+ type: int
+ security_groups:
+ description: Number of security groups to allow.
+ type: int
+ subnet_pools:
+ description: Number of subnet pools to allow.
+ type: int
+ subnets:
+ description: Number of subnets to allow.
+ type: int
+ volume:
+ description: Block storage service quotas
+ type: dict
+ contains:
+ backup_gigabytes:
+ description: Maximum size of backups in GB's.
+ type: int
+ backups:
+ description: Maximum number of backups allowed.
+ type: int
+ gigabytes:
+ description: Maximum volume storage allowed for project.
+ type: int
+ groups:
+ description: Number of groups that are allowed for the
+ project
+ type: int
+ per_volume_gigabytes:
+ description: Maximum size in GB's of individual volumes.
+ type: int
+ snapshots:
+ description: Number of snapshots to allow.
+ type: int
+ volumes:
+ description: Number of volumes to allow.
+ type: int
sample:
- openstack_quotas: {
- compute: {
+ quotas:
+ compute:
cores: 150,
fixed_ips: -1,
floating_ips: 10,
@@ -226,146 +304,134 @@ openstack_quotas:
instances: 100,
key_pairs: 100,
metadata_items: 128,
+ networks: -1,
ram: 153600,
- security_group_rules: 20,
- security_groups: 10,
+ security_group_rules: -1,
+ security_groups: -1,
server_group_members: 10,
- server_groups: 10
- },
- network: {
- floatingip: 50,
- loadbalancer: 10,
- network: 10,
- pool: 10,
- port: 160,
- rbac_policy: 10,
- router: 10,
- security_group: 10,
- security_group_rule: 100,
- subnet: 10,
- subnetpool: -1
- },
- volume: {
+ server_groups: 10,
+ network:
+ floating_ips: 50,
+ load_balancers: 10,
+ networks: 10,
+ pools: 10,
+ ports: 160,
+ rbac_policies: 10,
+ routers: 10,
+ security_group_rules: 100,
+ security_groups: 10,
+ subnet_pools: -1,
+ subnets: 10,
+ volume:
backup_gigabytes: 1000,
backups: 10,
gigabytes: 1000,
- gigabytes_lvm: -1,
+ groups: 10,
per_volume_gigabytes: -1,
snapshots: 10,
- snapshots_lvm: -1,
volumes: 10,
- volumes_lvm: -1
- }
- }
-
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from collections import defaultdict
class QuotaModule(OpenStackModule):
+ # TODO: Add missing network quota options 'check_limit', 'health_monitors',
+ # 'l7_policies', 'listeners' to argument_spec, DOCUMENTATION and
+ # RETURN docstrings
argument_spec = dict(
+ backup_gigabytes=dict(type='int'),
+ backups=dict(type='int'),
+ cores=dict(type='int'),
+ fixed_ips=dict(type='int'),
+ floating_ips=dict(
+ type='int', aliases=['floatingip', 'compute_floating_ips',
+ 'network_floating_ips']),
+ gigabytes=dict(type='int'),
+ groups=dict(type='int'),
+ injected_file_content_bytes=dict(type='int',
+ aliases=['injected_file_size']),
+ injected_file_path_bytes=dict(type='int',
+ aliases=['injected_path_size']),
+ injected_files=dict(type='int'),
+ instances=dict(type='int'),
+ key_pairs=dict(type='int', no_log=False),
+ load_balancers=dict(type='int', aliases=['loadbalancer']),
+ metadata_items=dict(type='int'),
name=dict(required=True),
+ networks=dict(type='int', aliases=['network']),
+ per_volume_gigabytes=dict(type='int'),
+ pools=dict(type='int', aliases=['pool']),
+ ports=dict(type='int', aliases=['port']),
+ ram=dict(type='int'),
+ rbac_policies=dict(type='int', aliases=['rbac_policy']),
+ routers=dict(type='int', aliases=['router']),
+ security_group_rules=dict(type='int', aliases=['security_group_rule']),
+ security_groups=dict(type='int', aliases=['security_group']),
+ server_group_members=dict(type='int'),
+ server_groups=dict(type='int'),
+ snapshots=dict(type='int'),
state=dict(default='present', choices=['absent', 'present']),
- backup_gigabytes=dict(required=False, type='int', default=None),
- backups=dict(required=False, type='int', default=None),
- cores=dict(required=False, type='int', default=None),
- fixed_ips=dict(required=False, type='int', default=None),
- floating_ips=dict(required=False, type='int', default=None, aliases=['compute_floating_ips']),
- floatingip=dict(required=False, type='int', default=None, aliases=['network_floating_ips']),
- gigabytes=dict(required=False, type='int', default=None),
- gigabytes_types=dict(required=False, type='dict', default={}),
- injected_file_size=dict(required=False, type='int', default=None),
- injected_files=dict(required=False, type='int', default=None),
- injected_path_size=dict(required=False, type='int', default=None),
- instances=dict(required=False, type='int', default=None),
- key_pairs=dict(required=False, type='int', default=None, no_log=False),
- loadbalancer=dict(required=False, type='int', default=None),
- metadata_items=dict(required=False, type='int', default=None),
- network=dict(required=False, type='int', default=None),
- per_volume_gigabytes=dict(required=False, type='int', default=None),
- pool=dict(required=False, type='int', default=None),
- port=dict(required=False, type='int', default=None),
- project=dict(required=False, type='int', default=None),
- properties=dict(required=False, type='int', default=None),
- ram=dict(required=False, type='int', default=None),
- rbac_policy=dict(required=False, type='int', default=None),
- router=dict(required=False, type='int', default=None),
- security_group_rule=dict(required=False, type='int', default=None),
- security_group=dict(required=False, type='int', default=None),
- server_group_members=dict(required=False, type='int', default=None),
- server_groups=dict(required=False, type='int', default=None),
- snapshots=dict(required=False, type='int', default=None),
- snapshots_types=dict(required=False, type='dict', default={}),
- subnet=dict(required=False, type='int', default=None),
- subnetpool=dict(required=False, type='int', default=None),
- volumes=dict(required=False, type='int', default=None),
- volumes_types=dict(required=False, type='dict', default={})
+ subnet_pools=dict(type='int', aliases=['subnetpool']),
+ subnets=dict(type='int', aliases=['subnet']),
+ volumes=dict(type='int'),
)
module_kwargs = dict(
supports_check_mode=True
)
- def _get_volume_quotas(self, project):
- return self.conn.get_volume_quotas(project)
-
- def _get_network_quotas(self, project):
- return self.conn.get_network_quotas(project)
-
- def _get_compute_quotas(self, project):
- return self.conn.get_compute_quotas(project)
+ # Some attributes in quota resources don't exist in the api anymore, mostly
+ # compute quotas that were simply network proxies. This map allows marking
+ # them to be skipped.
+ exclusion_map = {
+ 'compute': {
+ # 'fixed_ips', # Available until Nova API version 2.35
+ 'floating_ips', # Available until Nova API version 2.35
+ 'name',
+ 'networks', # Available until Nova API version 2.35
+ 'security_group_rules', # Available until Nova API version 2.35
+ 'security_groups', # Available until Nova API version 2.35
+ # 'injected_file_content_bytes', # Available until
+ # 'injected_file_path_bytes', # Nova API
+ # 'injected_files', # version 2.56
+ },
+ 'network': {'name'},
+ 'volume': {'name'},
+ }
def _get_quotas(self, project):
quota = {}
- try:
- quota['volume'] = self._get_volume_quotas(project)
- except Exception:
- self.warn("No public endpoint for volumev2 service was found. Ignoring volume quotas.")
-
- try:
- quota['network'] = self._get_network_quotas(project)
- except Exception:
- self.warn("No public endpoint for network service was found. Ignoring network quotas.")
-
- quota['compute'] = self._get_compute_quotas(project)
-
- for quota_type in quota.keys():
- quota[quota_type] = self._scrub_results(quota[quota_type])
-
- return quota
+ if self.conn.has_service('block-storage'):
+ quota['volume'] = self.conn.block_storage.get_quota_set(project)
+ else:
+ self.warn('Block storage service aka volume service is not'
+ ' supported by your cloud. Ignoring volume quotas.')
- def _scrub_results(self, quota):
- filter_attr = [
- 'HUMAN_ID',
- 'NAME_ATTR',
- 'human_id',
- 'request_ids',
- 'x_openstack_request_ids',
- ]
+ if self.conn.has_service('network'):
+ quota['network'] = self.conn.network.get_quota(project.id)
+ else:
+ self.warn('Network service is not supported by your cloud.'
+ ' Ignoring network quotas.')
- for attr in filter_attr:
- if attr in quota:
- del quota[attr]
+ quota['compute'] = self.conn.compute.get_quota_set(project.id)
return quota
- def _system_state_change_details(self, project_quota_output):
- quota_change_request = {}
- changes_required = False
-
- for quota_type in project_quota_output.keys():
- for quota_option in project_quota_output[quota_type].keys():
- if quota_option in self.params and self.params[quota_option] is not None:
- if project_quota_output[quota_type][quota_option] != self.params[quota_option]:
- changes_required = True
+ def _build_update(self, quotas):
+ changes = defaultdict(dict)
- if quota_type not in quota_change_request:
- quota_change_request[quota_type] = {}
+ for quota_type in quotas.keys():
+ exclusions = self.exclusion_map[quota_type]
+ for attr in quotas[quota_type].keys():
+ if attr in exclusions:
+ continue
+ if (attr in self.params and self.params[attr] is not None
+ and quotas[quota_type][attr] != self.params[attr]):
+ changes[quota_type][attr] = self.params[attr]
- quota_change_request[quota_type][quota_option] = self.params[quota_option]
-
- return (changes_required, quota_change_request)
+ return changes
def _system_state_change(self, project_quota_output):
"""
@@ -375,86 +441,54 @@ class QuotaModule(OpenStackModule):
the desired quota settings set on the module params.
"""
- changes_required, quota_change_request = self._system_state_change_details(
- project_quota_output
- )
-
- if changes_required:
+ if self.params['state'] == 'absent':
return True
- else:
- return False
- def run(self):
- cloud_params = dict(self.params)
-
- # In order to handle the different volume types we update module params after.
- dynamic_types = [
- 'gigabytes_types',
- 'snapshots_types',
- 'volumes_types',
- ]
+ return bool(self._build_update(project_quota_output))
- for dynamic_type in dynamic_types:
- for k, v in self.params[dynamic_type].items():
- self.params[k] = int(v)
+ def run(self):
+ project = self.conn.identity.find_project(
+ self.params['name'], ignore_missing=False)
# Get current quota values
- project_quota_output = self._get_quotas(cloud_params['name'])
- changes_required = False
-
- if self.params['state'] == "absent":
- # If a quota state is set to absent we should assume there will be changes.
- # The default quota values are not accessible so we can not determine if
- # no changes will occur or not.
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- # Calling delete_network_quotas when a quota has not been set results
- # in an error, according to the sdk docs it should return the
- # current quota.
- # The following error string is returned:
- # network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
- neutron_msg1 = "network client call failed: Quota for tenant"
- neutron_msg2 = "could not be found"
-
- for quota_type in project_quota_output.keys():
- quota_call = getattr(self.conn, 'delete_%s_quotas' % (quota_type))
- try:
- quota_call(cloud_params['name'])
- except Exception as e:
- error_msg = str(e)
- if error_msg.find(neutron_msg1) > -1 and error_msg.find(neutron_msg2) > -1:
- pass
- else:
- self.fail_json(msg=str(e), extra_data=e.extra_data)
-
- project_quota_output = self._get_quotas(cloud_params['name'])
- changes_required = True
-
- elif self.params['state'] == "present":
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(
- project_quota_output))
-
- changes_required, quota_change_request = self._system_state_change_details(
- project_quota_output
- )
-
- if changes_required:
- for quota_type in quota_change_request.keys():
- quota_call = getattr(self.conn, 'set_%s_quotas' % (quota_type))
- quota_call(cloud_params['name'], **quota_change_request[quota_type])
-
- # Get quota state post changes for validation
- project_quota_update = self._get_quotas(cloud_params['name'])
-
- if project_quota_output == project_quota_update:
- self.fail_json(msg='Could not apply quota update')
-
- project_quota_output = project_quota_update
-
- self.exit_json(
- changed=changes_required, openstack_quotas=project_quota_output)
+ quotas = self._get_quotas(project)
+
+ changed = False
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._system_state_change(quotas))
+
+ if self.params['state'] == 'absent':
+ # If a quota state is set to absent we should assume there will be
+ # changes. The default quota values are not accessible so we can
+ # not determine if no changes will occur or not.
+ changed = True
+ self.conn.compute.revert_quota_set(project)
+ if 'network' in quotas:
+ self.conn.network.delete_quota(project.id)
+ if 'volume' in quotas:
+ self.conn.block_storage.revert_quota_set(project)
+
+ # Necessary since we can't tell what the default quotas are
+ quotas = self._get_quotas(project)
+
+ elif self.params['state'] == 'present':
+ changes = self._build_update(quotas)
+
+ if changes:
+ if 'volume' in changes:
+ self.conn.block_storage.update_quota_set(
+ quotas['volume'], **changes['volume'])
+ if 'compute' in changes:
+ self.conn.compute.update_quota_set(
+ quotas['compute'], **changes['compute'])
+ if 'network' in changes:
+ quotas['network'] = self.conn.network.update_quota(
+ project.id, **changes['network'])
+ changed = True
+
+ quotas = {k: v.to_dict(computed=False) for k, v in quotas.items()}
+ self.exit_json(changed=changed, quotas=quotas)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/recordset.py b/ansible_collections/openstack/cloud/plugins/modules/recordset.py
index 921d6efaa..9f86c459b 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/recordset.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/recordset.py
@@ -1,4 +1,6 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 Hewlett-Packard Enterprise
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -48,10 +50,6 @@ options:
- Name or ID of the zone which manages the recordset
required: true
type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -164,12 +162,12 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class DnsRecordsetModule(OpenStackModule):
argument_spec = dict(
- description=dict(required=False, default=None),
+ description=dict(),
name=dict(required=True),
- records=dict(required=False, type='list', elements='str'),
- recordset_type=dict(required=False, choices=['a', 'aaaa', 'mx', 'cname', 'txt', 'ns', 'srv', 'ptr', 'caa']),
+ records=dict(type='list', elements='str'),
+ recordset_type=dict(choices=['a', 'aaaa', 'mx', 'cname', 'txt', 'ns', 'srv', 'ptr', 'caa']),
state=dict(default='present', choices=['absent', 'present']),
- ttl=dict(required=False, type='int'),
+ ttl=dict(type='int'),
zone=dict(required=True),
)
@@ -183,9 +181,15 @@ class DnsRecordsetModule(OpenStackModule):
module_min_sdk_version = '0.28.0'
def _needs_update(self, params, recordset):
- for k in ('description', 'records', 'ttl'):
+ if params['records'] is not None:
+ params['records'] = sorted(params['records'])
+ if recordset['records'] is not None:
+ recordset['records'] = sorted(recordset['records'])
+ for k in ('description', 'records', 'ttl', 'type'):
if k not in params:
continue
+ if k not in recordset:
+ return True
if params[k] is not None and params[k] != recordset[k]:
return True
return False
@@ -219,11 +223,8 @@ class DnsRecordsetModule(OpenStackModule):
state = self.params.get('state')
ttl = self.params.get('ttl')
- recordsets = self.conn.search_recordsets(zone, name_or_id=name)
-
- recordset = None
- if recordsets:
- recordset = recordsets[0]
+ zone = self.conn.dns.find_zone(name_or_id=zone, ignore_missing=False)
+ recordset = self.conn.dns.find_recordset(zone, name)
if self.ansible.check_mode:
self.exit_json(changed=self._system_state_change(state, recordset))
@@ -233,20 +234,14 @@ class DnsRecordsetModule(OpenStackModule):
kwargs = self._build_params()
if recordset is None:
kwargs['ttl'] = ttl or 300
- type = kwargs.pop('type', None)
- if type is not None:
- kwargs['recordset_type'] = type
- recordset = self.conn.create_recordset(zone=zone, name=name,
- **kwargs)
+ recordset = self.conn.dns.create_recordset(zone, name=name, **kwargs)
changed = True
elif self._needs_update(kwargs, recordset):
- type = kwargs.pop('type', None)
- recordset = self.conn.update_recordset(zone, recordset['id'],
- **kwargs)
+ recordset = self.conn.dns.update_recordset(recordset, **kwargs)
changed = True
self.exit_json(changed=changed, recordset=recordset)
elif state == 'absent' and recordset is not None:
- self.conn.delete_recordset(zone, recordset['id'])
+ self.conn.dns.delete_recordset(recordset)
changed = True
self.exit_json(changed=changed)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/resource.py b/ansible_collections/openstack/cloud/plugins/modules/resource.py
new file mode 100644
index 000000000..b2f5e6a2e
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/resource.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Jakob Meng, <jakobmeng@web.de>
+# Copyright (c) 2023 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: resource
+short_description: Manage a OpenStack cloud resource
+author: OpenStack Ansible SIG
+description:
+ - Create, update and delete a OpenStack cloud resource.
+options:
+ attributes:
+ description:
+ - "Resource attributes which are defined in openstacksdk's resource
+ classes."
+ - I(attributes) is a set of key-value pairs where each key is a attribute
+ name such as C(id) and value holds its corresponding attribute value
+ such C(ddad2d86-02a6-444d-80ae-1cc2fb023784).
+ - Define attribute keys C(id) or C(name) or any set of attribute keys
+ which uniquely identify a resource. This module fails if multiple
+ resources match the given set of attributes.
+ - For a complete list of attributes open any resource class inside
+ openstacksdk such as file C(openstack/compute/v2/server.py) in
+ U(https://opendev.org/openstack/openstacksdk/) for server attributes.
+ required: true
+ type: dict
+ non_updateable_attributes:
+ description:
+ - List of attribute names which cannot be updated.
+ - When I(non_updateable_attributes) is not specified, then all attributes
+ in I(attributes) will be compared to an existing resource during
+ updates.
+ - When both I(updateable_attributes) and I(non_updateable_attributes) are
+ specified, then only attributes which are listed in
+ I(updateable_attributes) but not in I(non_updateable_attributes) will
+ will be considered during updates.
+ type: list
+ elements: str
+ service:
+ description:
+ - OpenStack service which this resource is part of.
+ - Examples are C(block_storage), C(compute) or C(network).
+ - "I(service) must be a C(lowercase) name of a OpenStack service as
+ used in openstacksdk. For a list of available services visit
+ U(https://opendev.org/openstack/openstacksdk): Most subdirectories
+ in the C(openstack) directory correspond to a OpenStack service,
+ except C(cloud), C(common) and other auxiliary directories."
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the resource should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ type:
+ description:
+ - Typename of the resource.
+ - Examples are C(ip), C(network), C(router) or C(server).
+ - "I(type) must be a C(lowercase) name of a openstacksdk resource class.
+ Resource classes are defined in openstacksdk's service folders. For
+ example, visit U(https://opendev.org/openstack/openstacksdk), change
+ to C(openstack) directory, change to any service directory such as
+ C(compute), choose a api version directory such as C(v2) and find all
+ available resource classes such as C(Server) inside C(*.py) files."
+ required: true
+ type: str
+ updateable_attributes:
+ description:
+ - List of attribute names which can be updated.
+ - When I(updateable_attributes) is not specified, then all attributes
+ in I(attributes) will be compared to an existing resource during
+ updates.
+ - When both I(updateable_attributes) and I(non_updateable_attributes) are
+ specified, then only attributes which are listed in
+ I(updateable_attributes) but not in I(non_updateable_attributes) will
+ will be considered during updates.
+ type: list
+ elements: str
+ wait:
+ description:
+ - Whether Ansible should wait until the resource has reached its target
+ I(state).
+ - Only a subset of OpenStack resources report a status. Resources which
+ do not support status processing will block indefinitely if I(wait) is
+ set to C(true).
+ type: bool
+ default: false
+notes:
+ - "This module does not support all OpenStack cloud resources. Resource
+ handling must follow openstacksdk's CRUD structure using and providing
+ C(<service>.<type>s), C(<service>.find_<type>),
+ C(<service>.create_<type>), C(<service>.update_<type>) and
+ C(<service>.delete_<type>) functions. The module will fail before
+ applying any changes if these functions cannot be found."
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+resource:
+ description: Dictionary describing the identified (and possibly modified)
+ OpenStack cloud resource.
+ returned: On success when I(state) is C(present).
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Create external network
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: network
+ attributes:
+ name: ansible_network_external
+ is_router_external: true
+ wait: true
+ register: network_external
+
+- name: Create external subnet
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: subnet
+ attributes:
+ cidr: 10.6.6.0/24
+ ip_version: 4
+ name: ansible_external_subnet
+ network_id: "{{ network_external.resource.id }}"
+ register: subnet_external
+
+- name: Create external port
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: port
+ attributes:
+ name: ansible_port_external
+ network_id: "{{ network_external.resource.id }}"
+ fixed_ips:
+ - ip_address: 10.6.6.50
+ non_updateable_attributes:
+ - fixed_ips
+
+- name: Create internal network
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: network
+ attributes:
+ name: ansible_network_internal
+ is_router_external: false
+ wait: true
+ register: network_internal
+
+- name: Create internal subnet
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: subnet
+ attributes:
+ cidr: 10.7.7.0/24
+ ip_version: 4
+ name: ansible_internal_subnet
+ network_id: "{{ network_internal.resource.id }}"
+ register: subnet_internal
+
+- name: Create internal port
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: port
+ attributes:
+ name: ansible_port_internal
+ network_id: "{{ network_internal.resource.id }}"
+ fixed_ips:
+ - ip_address: 10.7.7.100
+ subnet_id: "{{ subnet_internal.resource.id }}"
+ register: port_internal
+
+- name: Create router
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: router
+ attributes:
+ name: ansible_router
+ external_gateway_info:
+ enable_snat: true
+ external_fixed_ips:
+ - ip_address: 10.6.6.10
+ subnet_id: "{{ subnet_external.resource.id }}"
+ network_id: "{{ network_external.resource.id }}"
+ wait: true
+
+- name: Attach router to internal subnet
+ openstack.cloud.router:
+ cloud: devstack-admin
+ name: ansible_router
+ network: "{{ network_external.resource.id }}"
+ external_fixed_ips:
+ - ip: 10.6.6.10
+ subnet: "{{ subnet_external.resource.id }}"
+ interfaces:
+ - net: "{{ network_internal.resource.id }}"
+ subnet: "{{ subnet_internal.resource.id }}"
+ portip: 10.7.7.1
+
+- name: Create floating ip address
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: ip
+ attributes:
+ name: 10.6.6.150
+ floating_ip_address: 10.6.6.150
+ floating_network_id: "{{ network_external.resource.id }}"
+ port_id: "{{ port_internal.resource.id }}"
+ register: ip
+
+- name: List images
+ openstack.cloud.resources:
+ cloud: devstack-admin
+ service: image
+ type: image
+ register: images
+
+- name: Identify CirrOS image id
+ set_fact:
+ image_id: "{{
+ images.resources|community.general.json_query(query)|first }}"
+ vars:
+ query: "[?starts_with(name, 'cirros')].id"
+
+- name: List compute flavors
+ openstack.cloud.resources:
+ cloud: devstack-admin
+ service: compute
+ type: flavor
+ register: flavors
+
+- name: Identify m1.tiny flavor id
+ set_fact:
+ flavor_id: "{{
+ flavors.resources|community.general.json_query(query)|first }}"
+ vars:
+ query: "[?name == 'm1.tiny'].id"
+
+- name: Create server
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: compute
+ type: server
+ attributes:
+ name: ansible_server
+ image_id: "{{ image_id }}"
+ flavor_id: "{{ flavor_id }}"
+ networks:
+ - uuid: "{{ network_internal.resource.id }}"
+ port: "{{ port_internal.resource.id }}"
+ non_updateable_attributes:
+ - name
+ - image_id
+ - flavor_id
+ - networks
+ wait: true
+
+- name: Detach floating ip address
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: ip
+ attributes:
+ floating_ip_address: 10.6.6.150
+ port_id: !!null
+
+- name: Delete server
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: compute
+ type: server
+ attributes:
+ name: ansible_server
+ state: absent
+ wait: true
+
+- name: Delete floating ip address
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: ip
+ attributes:
+ floating_ip_address: 10.6.6.150
+ state: absent
+
+- name: Detach router from internal subnet
+ openstack.cloud.router:
+ cloud: devstack-admin
+ name: ansible_router
+ network: "{{ network_external.resource.id }}"
+ external_fixed_ips:
+ - ip: 10.6.6.10
+ subnet: "{{ subnet_external.resource.id }}"
+ interfaces: []
+
+- name: Delete router
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: router
+ attributes:
+ name: ansible_router
+ state: absent
+ wait: true
+
+- name: Delete internal port
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: port
+ attributes:
+ name: ansible_port_internal
+ state: absent
+
+- name: Delete internal subnet
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: subnet
+ attributes:
+ name: ansible_internal_subnet
+ state: absent
+
+- name: Delete internal network
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: network
+ attributes:
+ name: ansible_network_internal
+ state: absent
+ wait: true
+
+- name: Delete external port
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: port
+ attributes:
+ name: ansible_port_external
+ state: absent
+
+- name: Delete external subnet
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: subnet
+ attributes:
+ name: ansible_external_subnet
+ state: absent
+
+- name: Delete external network
+ openstack.cloud.resource:
+ cloud: devstack-admin
+ service: network
+ type: network
+ attributes:
+ name: ansible_network_external
+ state: absent
+ wait: true
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+from ansible_collections.openstack.cloud.plugins.module_utils.resource import StateMachine
+
+
+class ResourceModule(OpenStackModule):
+ argument_spec = dict(
+ attributes=dict(required=True, type='dict'),
+ non_updateable_attributes=dict(type='list', elements='str'),
+ service=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ type=dict(required=True),
+ updateable_attributes=dict(type='list', elements='str'),
+ wait=dict(default=False, type='bool'),
+ )
+
+ module_kwargs = dict(
+ supports_check_mode=True
+ )
+
+ def run(self):
+ service_name = self.params['service']
+ type_name = self.params['type']
+
+ sm = StateMachine(connection=self.conn,
+ service_name=service_name,
+ type_name=type_name,
+ sdk=self.sdk)
+
+ kwargs = dict((k, self.params[k])
+ for k in ['attributes', 'non_updateable_attributes',
+ 'state', 'timeout', 'wait',
+ 'updateable_attributes'])
+
+ resource, is_changed = sm(check_mode=self.ansible.check_mode, **kwargs)
+
+ if resource is None:
+ self.exit_json(changed=is_changed)
+ else:
+ self.exit_json(changed=is_changed,
+ resource=resource.to_dict(computed=False))
+
+
+def main():
+ module = ResourceModule()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/resources.py b/ansible_collections/openstack/cloud/plugins/modules/resources.py
new file mode 100644
index 000000000..f6845daa2
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/resources.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Jakob Meng, <jakobmeng@web.de>
+# Copyright (c) 2023 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: resources
+short_description: List OpenStack cloud resources
+author: OpenStack Ansible SIG
+description:
+ - List OpenStack cloud resources.
+options:
+ service:
+ description:
+ - OpenStack service which this resource is part of.
+ - Examples are C(block_storage), C(compute) or C(network).
+ - "I(service) must be a C(lowercase) name of a OpenStack service as
+ used in openstacksdk. For a list of available services visit
+ U(https://opendev.org/openstack/openstacksdk): Most subdirectories
+ in the C(openstack) directory correspond to a OpenStack service,
+ except C(cloud), C(common) and other auxiliary directories."
+ required: true
+ type: str
+ parameters:
+ description:
+ - Query parameters passed to OpenStack API for results filtering.
+ - I(attributes) is a set of key-value pairs where each key is a attribute
+ name such as C(id) and value holds its corresponding attribute value
+ such C(ddad2d86-02a6-444d-80ae-1cc2fb023784).
+ - For a complete list of valid query parameters open any resource class
+ inside openstacksdk such as file C(openstack/compute/v2/server.py) in
+ U(https://opendev.org/openstack/openstacksdk/) and consult variable
+ C(_query_mapping).
+ type: dict
+ type:
+ description:
+ - Typename of the resource.
+ - Examples are C(ip), C(network), C(router) or C(server).
+ - "I(type) must be a C(lowercase) name of a openstacksdk resource class.
+ Resource classes are defined in openstacksdk's service folders. For
+ example, visit U(https://opendev.org/openstack/openstacksdk), change
+ to C(openstack) directory, change to any service directory such as
+ C(compute), choose a api version directory such as C(v2) and find all
+ available resource classes such as C(Server) inside C(*.py) files."
+ required: true
+ type: str
+notes:
+ - "This module does not support all OpenStack cloud resources. Resource
+ handling must follow openstacksdk's CRUD structure using and providing
+ a C(<service>.<type>s) function. The module will fail if this function
+ cannot be found."
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+resources:
+ description: Dictionary describing the identified OpenStack cloud resources.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = r'''
+- name: List images
+ openstack.cloud.resources:
+ cloud: devstack-admin
+ service: image
+ type: image
+ register: images
+
+- name: Identify CirrOS image id
+ set_fact:
+ image_id: "{{
+ images.resources|community.general.json_query(query)|first }}"
+ vars:
+ query: "[?starts_with(name, 'cirros')].id"
+
+- name: List compute flavors
+ openstack.cloud.resources:
+ cloud: devstack-admin
+ service: compute
+ type: flavor
+ register: flavors
+
+- name: Identify m1.tiny flavor id
+ set_fact:
+ flavor_id: "{{
+ flavors.resources|community.general.json_query(query)|first }}"
+ vars:
+ query: "[?name == 'm1.tiny'].id"
+
+- name: List public network
+ openstack.cloud.resources:
+ cloud: devstack-admin
+ service: network
+ type: network
+ parameters:
+ name: public
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class ResourcesModule(OpenStackModule):
+ argument_spec = dict(
+ parameters=dict(type='dict'),
+ service=dict(required=True),
+ type=dict(required=True),
+ )
+
+ module_kwargs = dict(
+ supports_check_mode=True
+ )
+
+ def run(self):
+ service_name = self.params['service']
+ type_name = self.params['type']
+
+ session = getattr(self.conn, service_name)
+ list_function = getattr(session, '{0}s'.format(type_name))
+
+ parameters = self.params['parameters']
+ resources = \
+ list_function(**parameters) if parameters else list_function()
+
+ self.exit_json(
+ changed=False,
+ resources=[r.to_dict(computed=False) for r in resources])
+
+
+def main():
+ module = ResourcesModule()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/role_assignment.py b/ansible_collections/openstack/cloud/plugins/modules/role_assignment.py
index 5ad7dce42..5ec699232 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/role_assignment.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/role_assignment.py
@@ -1,66 +1,92 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: role_assignment
-short_description: Associate OpenStack Identity users and roles
+short_description: Assign OpenStack identity groups and users to roles
author: OpenStack Ansible SIG
description:
- - Grant and revoke roles in either project or domain context for
- OpenStack Identity Users.
+ - Grant and revoke roles in either project or domain context for
+ OpenStack identity (Keystone) users and groups.
options:
- role:
- description:
- - Name or ID for the role.
- required: true
- type: str
- user:
- description:
- - Name or ID for the user. If I(user) is not specified, then
- I(group) is required. Both may not be specified.
- type: str
- group:
- description:
- - Name or ID for the group. Valid only with keystone version 3.
- If I(group) is not specified, then I(user) is required. Both
- may not be specified.
- type: str
- project:
- description:
- - Name or ID of the project to scope the role association to.
- If you are using keystone version 2, then this value is required.
- type: str
- domain:
- description:
- - Name or ID of the domain to scope the role association to. Valid only
- with keystone version 3, and required if I(project) is not specified.
- type: str
- state:
- description:
- - Should the roles be present or absent on the user.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ domain:
+ description:
+ - Name or ID of the domain to scope the role association to.
+ - Valid only with keystone version 3.
+ - Required if I(project) is not specified.
+ - When I(project) is specified, then I(domain) will not be used for
+ scoping the role association, only for finding resources.
+ - "When scoping the role association, I(project) has precedence over
+ I(domain) and I(domain) has precedence over I(system): When I(project)
+ is specified, then I(domain) and I(system) are not used for role
+ association. When I(domain) is specified, then I(system) will not be
+ used for role association."
+ type: str
+ group:
+ description:
+ - Name or ID for the group.
+ - Valid only with keystone version 3.
+ - If I(group) is not specified, then I(user) is required. Both may not be
+ specified at the same time.
+ type: str
+ project:
+ description:
+ - Name or ID of the project to scope the role association to.
+ - If you are using keystone version 2, then this value is required.
+ - When I(project) is specified, then I(domain) will not be used for
+ scoping the role association, only for finding resources.
+ - "When scoping the role association, I(project) has precedence over
+ I(domain) and I(domain) has precedence over I(system): When I(project)
+ is specified, then I(domain) and I(system) are not used for role
+ association. When I(domain) is specified, then I(system) will not be
+ used for role association."
+ type: str
+ role:
+ description:
+ - Name or ID for the role.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the roles be present or absent on the user.
+ choices: [present, absent]
+ default: present
+ type: str
+ system:
+ description:
+ - Name of system to scope the role association to.
+ - Valid only with keystone version 3.
+ - Required if I(project) and I(domain) are not specified.
+ - "When scoping the role association, I(project) has precedence over
+ I(domain) and I(domain) has precedence over I(system): When I(project)
+ is specified, then I(domain) and I(system) are not used for role
+ association. When I(domain) is specified, then I(system) will not be
+ used for role association."
+ type: str
+ user:
+ description:
+ - Name or ID for the user.
+ - If I(user) is not specified, then I(group) is required. Both may not be
+ specified at the same time.
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Grant an admin role on the user admin in the project project1
-- openstack.cloud.role_assignment:
+EXAMPLES = r'''
+- name: Grant an admin role on the user admin in the project project1
+ openstack.cloud.role_assignment:
cloud: mycloud
user: admin
role: admin
project: project1
-# Revoke the admin role from the user barney in the newyork domain
-- openstack.cloud.role_assignment:
+- name: Revoke the admin role from the user barney in the newyork domain
+ openstack.cloud.role_assignment:
cloud: mycloud
state: absent
user: barney
@@ -68,117 +94,95 @@ EXAMPLES = '''
domain: newyork
'''
-RETURN = '''
-#
-'''
-
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class IdentityRoleAssignmentModule(OpenStackModule):
argument_spec = dict(
+ domain=dict(),
+ group=dict(),
+ project=dict(),
role=dict(required=True),
- user=dict(required=False),
- group=dict(required=False),
- project=dict(required=False),
- domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
+ system=dict(),
+ user=dict(),
)
module_kwargs = dict(
required_one_of=[
- ['user', 'group']
+ ('user', 'group'),
+ ('domain', 'project', 'system'),
],
supports_check_mode=True
)
- def _system_state_change(self, state, assignment):
- if state == 'present' and not assignment:
- return True
- elif state == 'absent' and assignment:
- return True
- return False
-
- def _build_kwargs(self, user, group, project, domain):
- kwargs = {}
- if user:
- kwargs['user'] = user
- if group:
- kwargs['group'] = group
- if project:
- kwargs['project'] = project
- if domain:
- kwargs['domain'] = domain
- return kwargs
-
def run(self):
- role = self.params.get('role')
- user = self.params.get('user')
- group = self.params.get('group')
- project = self.params.get('project')
- domain = self.params.get('domain')
- state = self.params.get('state')
-
filters = {}
find_filters = {}
- domain_id = None
-
- r = self.conn.identity.find_role(role)
- if r is None:
- self.fail_json(msg="Role %s is not valid" % role)
- filters['role'] = r['id']
-
- if domain:
- d = self.conn.identity.find_domain(domain)
- if d is None:
- self.fail_json(msg="Domain %s is not valid" % domain)
- domain_id = d['id']
- find_filters['domain_id'] = domain_id
- if user:
- u = self.conn.identity.find_user(user, **find_filters)
- if u is None:
- self.fail_json(msg="User %s is not valid" % user)
- filters['user'] = u['id']
-
- if group:
- # self.conn.identity.find_group() does not accept
- # a domain_id argument in Train's openstacksdk
- g = self.conn.get_group(group, **find_filters)
- if g is None:
- self.fail_json(msg="Group %s is not valid" % group)
- filters['group'] = g['id']
- if project:
- p = self.conn.identity.find_project(project, **find_filters)
- if p is None:
- self.fail_json(msg="Project %s is not valid" % project)
- filters['project'] = p['id']
-
- # Keeping the self.conn.list_role_assignments because it calls directly
- # the identity.role_assignments and there are some logics for the
- # filters that won't worth rewrite here.
- assignment = self.conn.list_role_assignments(filters=filters)
+ kwargs = {}
+ role_name_or_id = self.params['role']
+ role = self.conn.identity.find_role(role_name_or_id,
+ ignore_missing=False)
+ filters['role_id'] = role['id']
+
+ domain_name_or_id = self.params['domain']
+ if domain_name_or_id is not None:
+ domain = self.conn.identity.find_domain(
+ domain_name_or_id, ignore_missing=False)
+ filters['scope_domain_id'] = domain['id']
+ find_filters['domain_id'] = domain['id']
+ kwargs['domain'] = domain['id']
+
+ user_name_or_id = self.params['user']
+ if user_name_or_id is not None:
+ user = self.conn.identity.find_user(
+ user_name_or_id, ignore_missing=False, **find_filters)
+ filters['user_id'] = user['id']
+ kwargs['user'] = user['id']
+
+ group_name_or_id = self.params['group']
+ if group_name_or_id is not None:
+ group = self.conn.identity.find_group(
+ group_name_or_id, ignore_missing=False, **find_filters)
+ filters['group_id'] = group['id']
+ kwargs['group'] = group['id']
+
+ system_name = self.params['system']
+ if system_name is not None:
+ # domain has precedence over system
+ if 'scope_domain_id' not in filters:
+ filters['scope.system'] = system_name
+
+ kwargs['system'] = system_name
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(
+ project_name_or_id, ignore_missing=False, **find_filters)
+ filters['scope_project_id'] = project['id']
+ kwargs['project'] = project['id']
+
+ # project has precedence over domain and system
+ filters.pop('scope_domain_id', None)
+ filters.pop('scope.system', None)
+
+ role_assignments = list(self.conn.identity.role_assignments(**filters))
+
+ state = self.params['state']
if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(state, assignment))
-
- changed = False
-
- # Both grant_role and revoke_role calls directly the proxy layer, and
- # has some logic that won't worth to rewrite here so keeping it is a
- # good idea
- if state == 'present':
- if not assignment:
- kwargs = self._build_kwargs(user, group, project, domain_id)
- self.conn.grant_role(role, **kwargs)
- changed = True
-
- elif state == 'absent':
- if assignment:
- kwargs = self._build_kwargs(user, group, project, domain_id)
- self.conn.revoke_role(role, **kwargs)
- changed = True
-
- self.exit_json(changed=changed)
+ self.exit_json(
+ changed=((state == 'present' and not role_assignments)
+ or (state == 'absent' and role_assignments)))
+
+ if state == 'present' and not role_assignments:
+ self.conn.grant_role(role['id'], **kwargs)
+ self.exit_json(changed=True)
+ elif state == 'absent' and role_assignments:
+ self.conn.revoke_role(role['id'], **kwargs)
+ self.exit_json(changed=True)
+ else:
+ self.exit_json(changed=False)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/router.py b/ansible_collections/openstack/cloud/plugins/modules/router.py
index 58c5c124e..7002a4110 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/router.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/router.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
-#
+# -*- coding: utf-8 -*-
+
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -13,53 +14,63 @@ description:
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Name to be give to the router
- required: true
- type: str
- admin_state_up:
- description:
- - Desired admin state of the created or existing router.
- type: bool
- default: 'yes'
- enable_snat:
- description:
+ enable_snat:
+ description:
- Enable Source NAT (SNAT) attribute.
- type: bool
- network:
- description:
- - Unique name or ID of the external gateway network.
- - required I(interfaces) or I(enable_snat) are provided.
- type: str
- project:
- description:
- - Unique name or ID of the project.
- type: str
- external_fixed_ips:
- description:
+ type: bool
+ external_fixed_ips:
+ description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
- address to assign on the subnet (ip). If no IP is specified,
+ address to assign on the subnet (ip_address). If no IP is specified,
one is automatically assigned from that subnet.
- type: list
- elements: dict
- suboptions:
- ip:
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
description: The fixed IP address to attempt to allocate.
- required: true
type: str
- subnet:
+ aliases: ['ip']
+ subnet_id:
description: The subnet to attach the IP address to.
+ required: true
type: str
- interfaces:
- description:
+ aliases: ['subnet']
+ external_gateway_info:
+ description:
+ - Information about the router's external gateway
+ type: dict
+ suboptions:
+ network:
+ description:
+ - Unique name or ID of the external gateway network.
+ - required I(interfaces) or I(enable_snat) are provided.
+ type: str
+ enable_snat:
+ description:
+ - Unique name or ID of the external gateway network.
+ - required I(interfaces) or I(enable_snat) are provided.
+ type: bool
+ external_fixed_ips:
+ description:
+ - The IP address parameters for the external gateway network. Each
+ is a dictionary with the subnet name or ID (subnet) and the IP
+ address to assign on the subnet (ip_address). If no IP is
+ specified, one is automatically assigned from that subnet.
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description: The fixed IP address to attempt to allocate.
+ type: str
+ aliases: ['ip']
+ subnet_id:
+ description: The subnet to attach the IP address to.
+ required: true
+ type: str
+ aliases: ['subnet']
+ interfaces:
+ description:
- List of subnets to attach to the router internal interface. Default
gateway associated with the subnet will be automatically attached
with the router's internal interface.
@@ -70,12 +81,37 @@ options:
User defined portip is often required when a multiple router need
to be connected to a single subnet for which the default gateway has
been already used.
- type: list
- elements: raw
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ type: list
+ elements: raw
+ is_admin_state_up:
+ description:
+ - Desired admin state of the created or existing router.
+ type: bool
+ default: 'true'
+ aliases: ['admin_state_up']
+ name:
+ description:
+ - Name to be give to the router.
+ - This router attribute cannot be updated.
+ required: true
+ type: str
+ network:
+ description:
+ - Unique name or ID of the external gateway network.
+ - Required if I(external_fixed_ips) or I(enable_snat) are provided.
+ - This router attribute cannot be updated.
+ type: str
+ project:
+ description:
+ - Unique name or ID of the project.
+ - This router attribute cannot be updated.
+ type: str
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -87,14 +123,14 @@ EXAMPLES = '''
state: present
name: simple_router
-# Create a simple router, not attached to a gateway or subnets for a given project.
+# Create a router, not attached to a gateway or subnets for a given project.
- openstack.cloud.router:
cloud: mycloud
state: present
name: simple_router
project: myproj
-# Creates a router attached to ext_network1 on an IPv4 subnet and one
+# Creates a router attached to ext_network1 on an IPv4 subnet and with one
# internal subnet interface.
- openstack.cloud.router:
cloud: mycloud
@@ -103,11 +139,11 @@ EXAMPLES = '''
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
- ip: 172.24.4.2
+ ip_address: 172.24.4.2
interfaces:
- private-subnet
-# Create another router with two internal subnet interfaces.One with user defined port
+# Create a router with two internal subnet interfaces and a user defined port
# ip and another with default gateway.
- openstack.cloud.router:
cloud: mycloud
@@ -120,8 +156,8 @@ EXAMPLES = '''
portip: 10.1.1.10
- project-subnet
-# Create another router with two internal subnet interface.One with user defined port
-# ip and and another with default gateway.
+# Create a router with two internal subnet interface. One with user defined
+# port ip and and another with default gateway.
- openstack.cloud.router:
cloud: mycloud
state: present
@@ -133,8 +169,8 @@ EXAMPLES = '''
portip: 10.1.1.10
- project-subnet
-# Create another router with two internal subnet interface. one with user defined port
-# ip and and another with default gateway.
+# Create a router with two internal subnet interface. One with user defined
+# port ip and and another with default gateway.
- openstack.cloud.router:
cloud: mycloud
state: present
@@ -156,9 +192,9 @@ EXAMPLES = '''
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
- ip: 172.24.4.2
+ ip_address: 172.24.4.2
- subnet: ipv6-public-subnet
- ip: 2001:db8::3
+ ip_address: 2001:db8::3
# Delete router1
- openstack.cloud.router:
@@ -171,296 +207,431 @@ RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
- type: complex
+ type: dict
contains:
+ availability_zones:
+ description: Availability zones
+ returned: success
+ type: list
+ availability_zone_hints:
+ description: Availability zone hints
+ returned: success
+ type: list
+ created_at:
+ description: Date and time when the router was created
+ returned: success
+ type: str
+ description:
+ description: Description notes of the router
+ returned: success
+ type: str
+ external_gateway_info:
+ description: The external gateway information of the router.
+ returned: success
+ type: dict
+ sample: |
+ {
+ "enable_snat": true,
+ "external_fixed_ips": [
+ {
+ "ip_address": "10.6.6.99",
+ "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
+ }
+ ]
+ }
+ flavor_id:
+ description: ID of the flavor of the router
+ returned: success
+ type: str
id:
- description: Router ID.
+ description: Unique UUID.
+ returned: success
type: str
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ is_admin_state_up:
+ description: Network administrative state
+ returned: success
+ type: bool
+ is_distributed:
+ description: Indicates a distributed router.
+ returned: success
+ type: bool
+ is_ha:
+ description: Indicates a highly-available router.
+ returned: success
+ type: bool
name:
- description: Router name.
+ description: Name given to the router.
+ returned: success
type: str
sample: "router1"
- admin_state_up:
- description: Administrative state of the router.
- type: bool
- sample: true
+ project_id:
+ description: Project id associated with this router.
+ returned: success
+ type: str
+ revision_number:
+ description: Revision number
+ returned: success
+ type: int
+ routes:
+ description: The extra routes configuration for L3 router.
+ returned: success
+ type: list
status:
- description: The router status.
+ description: Router status.
+ returned: success
type: str
sample: "ACTIVE"
+ tags:
+ description: List of tags
+ returned: success
+ type: list
tenant_id:
- description: The tenant ID.
+ description: Owner tenant ID
+ returned: success
+ type: str
+ updated_at:
+ description: Date of last update on the router
+ returned: success
type: str
- sample: "861174b82b43463c9edc5202aadc60ef"
- external_gateway_info:
- description: The external gateway parameters.
- type: dict
- sample: {
- "enable_snat": true,
- "external_fixed_ips": [
- {
- "ip_address": "10.6.6.99",
- "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
- }
- ]
- }
- routes:
- description: The extra routes configuration for L3 router.
- type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-import itertools
+from collections import defaultdict
class RouterModule(OpenStackModule):
+
+ external_fixed_ips_spec = dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ ip_address=dict(aliases=["ip"]),
+ subnet_id=dict(required=True, aliases=["subnet"]),
+ ))
+
argument_spec = dict(
- state=dict(default='present', choices=['absent', 'present']),
- name=dict(required=True),
- admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool'),
- network=dict(default=None),
- interfaces=dict(type='list', default=None, elements='raw'),
- external_fixed_ips=dict(type='list', default=None, elements='dict'),
- project=dict(default=None)
+ external_fixed_ips=external_fixed_ips_spec,
+ external_gateway_info=dict(type='dict', options=dict(
+ network=dict(),
+ enable_snat=dict(type='bool'),
+ external_fixed_ips=external_fixed_ips_spec,
+ )),
+ interfaces=dict(type='list', elements='raw'),
+ is_admin_state_up=dict(type='bool',
+ default=True,
+ aliases=['admin_state_up']),
+ name=dict(required=True),
+ network=dict(),
+ project=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
)
- def _get_subnet_ids_from_ports(self, ports):
- return [fixed_ip['subnet_id'] for fixed_ip in
- itertools.chain.from_iterable(port['fixed_ips'] for port in ports if 'fixed_ips' in port)]
+ module_kwargs = dict(
+ mutually_exclusive=[
+ ('external_gateway_info', 'network'),
+ ('external_gateway_info', 'external_fixed_ips'),
+ ('external_gateway_info', 'enable_snat'),
+ ],
+ required_by={
+ 'external_fixed_ips': 'network',
+ 'enable_snat': 'network',
+ },
+ )
- def _needs_update(self, router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg):
+ def _needs_update(self, router, kwargs, external_fixed_ips, to_add,
+ to_remove, missing_port_ids):
"""Decide if the given router needs an update."""
- if router['admin_state_up'] != self.params['admin_state_up']:
+ if router['is_admin_state_up'] != self.params['is_admin_state_up']:
return True
- if router['external_gateway_info']:
- # check if enable_snat is set in module params
- if self.params['enable_snat'] is not None:
- if router['external_gateway_info'].get('enable_snat', True) != self.params['enable_snat']:
- return True
- if net:
- if not router['external_gateway_info']:
- return True
- elif router['external_gateway_info']['network_id'] != net['id']:
- return True
- # check if external_fixed_ip has to be added
- for external_fixed_ip in router_ifs_cfg['external_fixed_ips']:
- exists = False
-
- # compare the requested interface with existing, looking for an existing match
- for existing_if in router['external_gateway_info']['external_fixed_ips']:
- if existing_if['subnet_id'] == external_fixed_ip['subnet_id']:
- if 'ip' in external_fixed_ip:
- if existing_if['ip_address'] == external_fixed_ip['ip']:
- # both subnet id and ip address match
- exists = True
- break
- else:
- # only the subnet was given, so ip doesn't matter
- exists = True
- break
-
- # this interface isn't present on the existing router
- if not exists:
+ cur_ext_gw_info = router['external_gateway_info']
+ if 'external_gateway_info' in kwargs:
+ if cur_ext_gw_info is None:
+ # added external gateway info
return True
-
- # check if external_fixed_ip has to be removed
- if router_ifs_cfg['external_fixed_ips']:
- for external_fixed_ip in router['external_gateway_info']['external_fixed_ips']:
- obsolete = True
-
- # compare the existing interface with requested, looking for an requested match
- for requested_if in router_ifs_cfg['external_fixed_ips']:
- if external_fixed_ip['subnet_id'] == requested_if['subnet_id']:
- if 'ip' in requested_if:
- if external_fixed_ip['ip_address'] == requested_if['ip']:
- # both subnet id and ip address match
- obsolete = False
- break
- else:
- # only the subnet was given, so ip doesn't matter
- obsolete = False
- break
-
- # this interface isn't present on the existing router
- if obsolete:
+ update = kwargs['external_gateway_info']
+ for attr in ('enable_snat', 'network_id'):
+ if attr in update and cur_ext_gw_info[attr] != update[attr]:
return True
- else:
- # no external fixed ips requested
- if router['external_gateway_info'] \
- and router['external_gateway_info']['external_fixed_ips'] \
- and len(router['external_gateway_info']['external_fixed_ips']) > 1:
- # but router has several external fixed ips
- return True
- # check if internal port has to be added
- if router_ifs_cfg['internal_ports_missing']:
- return True
+ cur_ext_gw_info = router['external_gateway_info']
+ cur_ext_fips = (cur_ext_gw_info or {}) \
+ .get('external_fixed_ips', [])
+
+ # map of external fixed ip subnets to addresses
+ cur_fip_map = defaultdict(set)
+ for p in cur_ext_fips:
+ if 'ip_address' in p:
+ cur_fip_map[p['subnet_id']].add(p['ip_address'])
+ req_fip_map = defaultdict(set)
+ if external_fixed_ips is not None:
+ # User passed expected external_fixed_ips configuration.
+ # Build map of requested ips/subnets.
+ for p in external_fixed_ips:
+ if 'ip_address' in p:
+ req_fip_map[p['subnet_id']].add(p['ip_address'])
+
+ # Check if external ip addresses need to be added
+ for fip in external_fixed_ips:
+ subnet = fip['subnet_id']
+ ip = fip.get('ip_address', None)
+ if subnet in cur_fip_map:
+ if ip is not None and ip not in cur_fip_map[subnet]:
+ # mismatching ip for subnet
+ return True
+ else:
+ # adding ext ip with subnet 'subnet'
+ return True
- if missing_port_ids:
- return True
+ # Check if external ip addresses need to be removed.
+ for fip in cur_ext_fips:
+ subnet = fip['subnet_id']
+ ip = fip['ip_address']
+ if subnet in req_fip_map:
+ if ip not in req_fip_map[subnet]:
+ # removing ext ip with subnet (ip clash)
+ return True
+ else:
+ # removing ext ip with subnet
+ return True
- # check if internal subnet has to be added or removed
- if set(requested_subnet_ids) != set(existing_subnet_ids):
+ # Check if internal interfaces need update
+ if to_add or to_remove or missing_port_ids:
+ # need to change interfaces
return True
return False
- def _build_kwargs(self, router, net):
+ def _build_kwargs(self, router, network, ext_fixed_ips):
kwargs = {
- 'admin_state_up': self.params['admin_state_up'],
+ 'is_admin_state_up': self.params['is_admin_state_up'],
}
- if router:
- kwargs['name_or_id'] = router['id']
- else:
+ if not router:
kwargs['name'] = self.params['name']
+ # We cannot update a router name because name is used to find routers
+ # by name so only any router with an already matching name will be
+ # considered for updates
- if net:
- kwargs['ext_gateway_net_id'] = net['id']
+ external_gateway_info = {}
+ if network:
+ external_gateway_info['network_id'] = network.id
# can't send enable_snat unless we have a network
- if self.params.get('enable_snat') is not None:
- kwargs['enable_snat'] = self.params['enable_snat']
-
- if self.params['external_fixed_ips']:
- kwargs['ext_fixed_ips'] = []
- for iface in self.params['external_fixed_ips']:
- subnet = self.conn.get_subnet(iface['subnet'])
- d = {'subnet_id': subnet['id']}
- if 'ip' in iface:
- d['ip_address'] = iface['ip']
- kwargs['ext_fixed_ips'].append(d)
- else:
+ if self.params['enable_snat'] is not None:
+ external_gateway_info['enable_snat'] = \
+ self.params['enable_snat']
+ if ext_fixed_ips:
+ external_gateway_info['external_fixed_ips'] = ext_fixed_ips
+ if external_gateway_info:
+ kwargs['external_gateway_info'] = external_gateway_info
+
+ if 'external_fixed_ips' not in external_gateway_info:
# no external fixed ips requested
- if router \
- and router['external_gateway_info'] \
- and router['external_gateway_info']['external_fixed_ips'] \
- and len(router['external_gateway_info']['external_fixed_ips']) > 1:
+
+ # get current external fixed ips
+ curr_ext_gw_info = \
+ router['external_gateway_info'] if router else None
+ curr_ext_fixed_ips = \
+ curr_ext_gw_info.get('external_fixed_ips', []) \
+ if curr_ext_gw_info else []
+
+ if len(curr_ext_fixed_ips) > 1:
# but router has several external fixed ips
# keep first external fixed ip only
- fip = router['external_gateway_info']['external_fixed_ips'][0]
- kwargs['ext_fixed_ips'] = [fip]
+ external_gateway_info['external_fixed_ips'] = [
+ curr_ext_fixed_ips[0]]
return kwargs
- def _build_router_interface_config(self, filters=None):
- external_fixed_ips = []
- internal_subnets = []
- internal_ports = []
+ def _build_router_interface_config(self, filters):
+ # Undefine external_fixed_ips to have possibility to unset them
+ external_fixed_ips = None
internal_ports_missing = []
+ internal_ifaces = []
# Build external interface configuration
- if self.params['external_fixed_ips']:
- for iface in self.params['external_fixed_ips']:
- subnet = self.conn.get_subnet(iface['subnet'], filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface['subnet'])
- new_external_fixed_ip = {'subnet_name': subnet.name, 'subnet_id': subnet.id}
- if 'ip' in iface:
- new_external_fixed_ip['ip'] = iface['ip']
- external_fixed_ips.append(new_external_fixed_ip)
+ ext_fixed_ips = None
+ if self.params['external_gateway_info']:
+ ext_fixed_ips = self.params['external_gateway_info'] \
+ .get('external_fixed_ips')
+ ext_fixed_ips = ext_fixed_ips or self.params['external_fixed_ips']
+ if ext_fixed_ips:
+ # User passed external_fixed_ips configuration. Initialize ips list
+ external_fixed_ips = []
+ for iface in ext_fixed_ips:
+ subnet = self.conn.network.find_subnet(
+ iface['subnet_id'], ignore_missing=False, **filters)
+ fip = dict(subnet_id=subnet.id)
+ if 'ip_address' in iface:
+ fip['ip_address'] = iface['ip_address']
+ external_fixed_ips.append(fip)
# Build internal interface configuration
if self.params['interfaces']:
internal_ips = []
for iface in self.params['interfaces']:
if isinstance(iface, str):
- subnet = self.conn.get_subnet(iface, filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface)
- internal_subnets.append(subnet)
+ subnet = self.conn.network.find_subnet(
+ iface, ignore_missing=False, **filters)
+ internal_ifaces.append(dict(subnet_id=subnet.id))
elif isinstance(iface, dict):
- subnet = self.conn.get_subnet(iface['subnet'], filters)
- if not subnet:
- self.fail(msg='subnet %s not found' % iface['subnet'])
-
- net = self.conn.get_network(iface['net'])
- if not net:
- self.fail(msg='net %s not found' % iface['net'])
-
- if "portip" not in iface:
+ subnet = self.conn.network.find_subnet(
+ iface['subnet'], ignore_missing=False, **filters)
+
+ # TODO: We allow passing a subnet without specifing a
+ # network in case iface is a string, hence we
+ # should allow to omit the network here as well.
+ if 'net' not in iface:
+ self.fail(
+ "Network name missing from interface definition")
+ net = self.conn.network.find_network(iface['net'],
+ ignore_missing=False)
+
+ if 'portip' not in iface:
# portip not set, add any ip from subnet
- internal_subnets.append(subnet)
+ internal_ifaces.append(dict(subnet_id=subnet.id))
elif not iface['portip']:
# portip is set but has invalid value
- self.fail(msg='put an ip in portip or remove it from list to assign default port to router')
+ self.fail(msg='put an ip in portip or remove it'
+ 'from list to assign default port to router')
else:
# portip has valid value
- # look for ports whose fixed_ips.ip_address matchs portip
- for existing_port in self.conn.list_ports(filters={'network_id': net.id}):
- for fixed_ip in existing_port['fixed_ips']:
- if iface['portip'] == fixed_ip['ip_address']:
- # portip exists in net already
- internal_ports.append(existing_port)
- internal_ips.append(fixed_ip['ip_address'])
- if iface['portip'] not in internal_ips:
- # no port with portip exists hence create a new port
+ # look for ports whose fixed_ips.ip_address matchs
+ # portip
+ portip = iface['portip']
+ port_kwargs = ({'network_id': net.id}
+ if net is not None else {})
+ existing_ports = self.conn.network.ports(**port_kwargs)
+ for port in existing_ports:
+ for fip in port['fixed_ips']:
+ if (fip['subnet_id'] != subnet.id
+ or fip['ip_address'] != portip):
+ continue
+ # portip exists in net already
+ internal_ips.append(fip['ip_address'])
+ internal_ifaces.append(
+ dict(port_id=port.id,
+ subnet_id=subnet.id,
+ ip_address=portip))
+ if portip not in internal_ips:
+ # No port with portip exists
+ # hence create a new port
internal_ports_missing.append({
- 'network_id': net.id,
- 'fixed_ips': [{'ip_address': iface['portip'], 'subnet_id': subnet.id}]
+ 'network_id': subnet.network_id,
+ 'fixed_ips': [{'ip_address': portip,
+ 'subnet_id': subnet.id}]
})
return {
'external_fixed_ips': external_fixed_ips,
- 'internal_subnets': internal_subnets,
- 'internal_ports': internal_ports,
- 'internal_ports_missing': internal_ports_missing
+ 'internal_ports_missing': internal_ports_missing,
+ 'internal_ifaces': internal_ifaces,
}
- def run(self):
+ def _update_ifaces(self, router, to_add, to_remove, missing_ports):
+ for port in to_remove:
+ self.conn.network.remove_interface_from_router(
+ router, port_id=port.id)
+ # create ports that are missing
+ for port in missing_ports:
+ p = self.conn.network.create_port(**port)
+ if p:
+ to_add.append(dict(port_id=p.id))
+ for iface in to_add:
+ self.conn.network.add_interface_to_router(router, **iface)
+
+ def _get_external_gateway_network_name(self):
+ network_name_or_id = self.params['network']
+ if self.params['external_gateway_info']:
+ network_name_or_id = \
+ self.params['external_gateway_info']['network']
+ return network_name_or_id
+
+ def _get_port_changes(self, router, ifs_cfg):
+ requested_subnet_ids = [iface['subnet_id'] for iface
+ in ifs_cfg['internal_ifaces']]
+
+ router_ifs_internal = []
+ if router:
+ router_ifs_internal = self.conn.list_router_interfaces(
+ router, 'internal')
+
+ existing_subnet_ips = {}
+ for iface in router_ifs_internal:
+ if 'fixed_ips' not in iface:
+ continue
+ for fip in iface['fixed_ips']:
+ existing_subnet_ips[fip['subnet_id']] = (fip['ip_address'],
+ iface)
+
+ obsolete_subnet_ids = (set(existing_subnet_ips.keys())
+ - set(requested_subnet_ids))
+
+ internal_ifaces = ifs_cfg['internal_ifaces']
+ to_add = []
+ to_remove = []
+ for iface in internal_ifaces:
+ subnet_id = iface['subnet_id']
+ if subnet_id not in existing_subnet_ips:
+ iface.pop('ip_address', None)
+ to_add.append(iface)
+ continue
+ ip, existing_port = existing_subnet_ips[subnet_id]
+ if 'ip_address' in iface and ip != iface['ip_address']:
+ # Port exists for subnet but has the wrong ip. Schedule it for
+ # deletion
+ to_remove.append(existing_port)
+
+ for port in router_ifs_internal:
+ if 'fixed_ips' not in port:
+ continue
+ if any(fip['subnet_id'] in obsolete_subnet_ids
+ for fip in port['fixed_ips']):
+ to_remove.append(port)
+ return dict(to_add=to_add, to_remove=to_remove,
+ router_ifs_internal=router_ifs_internal)
+ def run(self):
state = self.params['state']
name = self.params['name']
- network = self.params['network']
- project = self.params['project']
-
- if self.params['external_fixed_ips'] and not network:
- self.fail(msg='network is required when supplying external_fixed_ips')
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail(msg='Project %s could not be found' % project)
- project_id = proj['id']
- filters = {'tenant_id': project_id}
- else:
- project_id = None
- filters = None
-
- router = self.conn.get_router(name, filters=filters)
- net = None
- if network:
- net = self.conn.get_network(network)
- if not net:
- self.fail(msg='network %s not found' % network)
+ network_name_or_id = self._get_external_gateway_network_name()
+ project_name_or_id = self.params['project']
+
+ if self.params['external_fixed_ips'] and not network_name_or_id:
+ self.fail(
+ msg='network is required when supplying external_fixed_ips')
+
+ query_filters = {}
+ project = None
+ project_id = None
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(project_name_or_id,
+ ignore_missing=False)
+ project_id = project['id']
+ query_filters['project_id'] = project_id
+
+ router = self.conn.network.find_router(name, **query_filters)
+ network = None
+ if network_name_or_id:
+ network = self.conn.network.find_network(network_name_or_id,
+ ignore_missing=False,
+ **query_filters)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
- router_ifs_cfg = self._build_router_interface_config(filters)
- requested_subnet_ids = [subnet.id for subnet in router_ifs_cfg['internal_subnets']] + \
- self._get_subnet_ids_from_ports(router_ifs_cfg['internal_ports'])
- requested_port_ids = [i['id'] for i in router_ifs_cfg['internal_ports']]
+ router_ifs_cfg = self._build_router_interface_config(query_filters)
- if router:
- router_ifs_internal = self.conn.list_router_interfaces(router, 'internal')
- existing_subnet_ids = self._get_subnet_ids_from_ports(router_ifs_internal)
- obsolete_subnet_ids = set(existing_subnet_ids) - set(requested_subnet_ids)
- existing_port_ids = [i['id'] for i in router_ifs_internal]
+ missing_internal_ports = router_ifs_cfg['internal_ports_missing']
- else:
- router_ifs_internal = []
- existing_subnet_ids = []
- obsolete_subnet_ids = []
- existing_port_ids = []
+ port_changes = self._get_port_changes(router, router_ifs_cfg)
+ to_add = port_changes['to_add']
+ to_remove = port_changes['to_remove']
+ router_ifs_internal = port_changes['router_ifs_internal']
- missing_port_ids = set(requested_port_ids) - set(existing_port_ids)
+ external_fixed_ips = router_ifs_cfg['external_fixed_ips']
if self.ansible.check_mode:
# Check if the system state would be changed
@@ -471,82 +642,44 @@ class RouterModule(OpenStackModule):
elif state == 'present' and not router:
changed = True
else: # if state == 'present' and router
- changed = self._needs_update(router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg)
+ kwargs = self._build_kwargs(router, network,
+ external_fixed_ips)
+ changed = self._needs_update(
+ router, kwargs, external_fixed_ips, to_add, to_remove,
+ missing_internal_ports)
self.exit_json(changed=changed)
if state == 'present':
changed = False
+ external_fixed_ips = router_ifs_cfg['external_fixed_ips']
+ internal_ifaces = router_ifs_cfg['internal_ifaces']
+ kwargs = self._build_kwargs(router, network,
+ external_fixed_ips)
if not router:
changed = True
- kwargs = self._build_kwargs(router, net)
if project_id:
kwargs['project_id'] = project_id
- router = self.conn.create_router(**kwargs)
-
- # add interface by subnet id, because user did not specify a port id
- for subnet in router_ifs_cfg['internal_subnets']:
- self.conn.add_router_interface(router, subnet_id=subnet.id)
+ router = self.conn.network.create_router(**kwargs)
- # add interface by port id if user did specify a valid port id
- for port in router_ifs_cfg['internal_ports']:
- self.conn.add_router_interface(router, port_id=port.id)
-
- # add port and interface if user did specify an ip address but port is missing yet
- for missing_internal_port in router_ifs_cfg['internal_ports_missing']:
- p = self.conn.create_port(**missing_internal_port)
- if p:
- self.conn.add_router_interface(router, port_id=p.id)
+ self._update_ifaces(router, internal_ifaces, [],
+ missing_internal_ports)
else:
- if self._needs_update(router, net,
- missing_port_ids,
- requested_subnet_ids,
- existing_subnet_ids,
- router_ifs_cfg):
+
+ if self._needs_update(router, kwargs, external_fixed_ips,
+ to_add, to_remove,
+ missing_internal_ports):
changed = True
- kwargs = self._build_kwargs(router, net)
- updated_router = self.conn.update_router(**kwargs)
+ router = self.conn.network.update_router(router, **kwargs)
- # Protect against update_router() not actually updating the router.
- if not updated_router:
- changed = False
- else:
- router = updated_router
-
- # delete internal subnets i.e. ports
- if obsolete_subnet_ids:
- for port in router_ifs_internal:
- if 'fixed_ips' in port:
- for fip in port['fixed_ips']:
- if fip['subnet_id'] in obsolete_subnet_ids:
- self.conn.remove_router_interface(router, port_id=port['id'])
- changed = True
-
- # add new internal interface by subnet id, because user did not specify a port id
- for subnet in router_ifs_cfg['internal_subnets']:
- if subnet.id not in existing_subnet_ids:
- self.conn.add_router_interface(router, subnet_id=subnet.id)
- changed = True
-
- # add new internal interface by port id if user did specify a valid port id
- for port_id in missing_port_ids:
- self.conn.add_router_interface(router, port_id=port_id)
- changed = True
-
- # add new port and new internal interface if user did specify an ip address but port is missing yet
- for missing_internal_port in router_ifs_cfg['internal_ports_missing']:
- p = self.conn.create_port(**missing_internal_port)
- if p:
- self.conn.add_router_interface(router, port_id=p.id)
- changed = True
-
- self.exit_json(changed=changed, router=router)
+ if to_add or to_remove or missing_internal_ports:
+ self._update_ifaces(router, to_add, to_remove,
+ missing_internal_ports)
+
+ self.exit_json(changed=changed,
+ router=router.to_dict(computed=False))
elif state == 'absent':
if not router:
@@ -557,9 +690,10 @@ class RouterModule(OpenStackModule):
# still fail if e.g. floating ips are attached to the
# router.
for port in router_ifs_internal:
- self.conn.remove_router_interface(router, port_id=port['id'])
- self.conn.delete_router(router['id'])
- self.exit_json(changed=True, router=router)
+ self.conn.network.remove_interface_from_router(
+ router, port_id=port['id'])
+ self.conn.network.delete_router(router)
+ self.exit_json(changed=True)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/routers_info.py b/ansible_collections/openstack/cloud/plugins/modules/routers_info.py
index 990eef8dc..707aaa418 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/routers_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/routers_info.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+
# Copyright (c) 2019, Bram Verschueren <verschueren.bram@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -18,14 +19,16 @@ options:
type: str
filters:
description:
- - A dictionary of meta data to use for further filtering. Elements of
+ - A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
type: dict
+ default: {}
suboptions:
project_id:
description:
- - Filter the list result by the ID of the project that owns the resource.
+ - Filter the list result by the ID of the project that owns the
+ resource.
type: str
aliases:
- tenant_id
@@ -35,11 +38,13 @@ options:
type: str
description:
description:
- - Filter the list result by the human-readable description of the resource.
+ - Filter the list result by the human-readable description of the
+ resource.
type: str
- admin_state_up:
+ is_admin_state_up:
description:
- - Filter the list result by the administrative state of the resource, which is up (true) or down (false).
+ - Filter the list result by the administrative state of the
+ resource, which is up (true) or down (false).
type: bool
revision_number:
description:
@@ -47,12 +52,10 @@ options:
type: int
tags:
description:
- - A list of tags to filter the list result by. Resources that match all tags in this list will be returned.
+ - A list of tags to filter the list result by. Resources that match
+ all tags in this list will be returned.
type: list
elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -69,7 +72,7 @@ EXAMPLES = '''
- name: Show openstack routers
debug:
- msg: "{{ result.openstack_routers }}"
+ msg: "{{ result.routers }}"
- name: Gather information about a router by name
openstack.cloud.routers_info:
@@ -83,7 +86,7 @@ EXAMPLES = '''
- name: Show openstack routers
debug:
- msg: "{{ result.openstack_routers }}"
+ msg: "{{ result.routers }}"
- name: Gather information about a router with filter
openstack.cloud.routers_info:
@@ -93,56 +96,155 @@ EXAMPLES = '''
password: password
project_name: someproject
filters:
- tenant_id: bc3ea709c96849d6b81f54640400a19f
+ is_admin_state_up: True
register: result
- name: Show openstack routers
debug:
- msg: "{{ result.openstack_routers }}"
+ msg: "{{ result.routers }}"
+
+- name: List all routers
+ openstack.cloud.routers_info:
+ cloud: devstack
+ register: routers
+
+- name: List ports of first router
+ openstack.cloud.port_info:
+ cloud: devstack
+ filters:
+ device_id: "{{ routers.routers.0.id }}"
+ register: ports
+
+- name: Show first router's fixed ips
+ debug:
+ msg: "{{ ports.ports
+ |rejectattr('device_owner', 'equalto', 'network:router_gateway')
+ |sum(attribute='fixed_ips', start=[])
+ |map(attribute='ip_address')
+ |sort|list }}"
+
+- name: List ports of all routers
+ loop: "{{ routers.routers }}"
+ openstack.cloud.port_info:
+ cloud: devstack
+ filters:
+ device_id: "{{ item['id'] }}"
+ register: ports
+
+- name: Transform ports for interfaces_info entries
+ loop: "{{ ports.results|map(attribute='ports')|list }}"
+ set_fact:
+ interfaces_info: |-
+ {% for port in item %}
+ {% if port.device_owner != "network:router_gateway" %}
+ {% for fixed_ip in port['fixed_ips'] %}
+ - port_id: {{ port.id }}
+ ip_address: {{ fixed_ip.ip_address }}
+ subnet_id: {{ fixed_ip.subnet_id }}
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+ register: interfaces
+
+- name: Combine router and interfaces_info entries
+ loop: "{{
+ routers.routers|zip(interfaces.results|map(attribute='ansible_facts'))|list
+ }}"
+ set_fact:
+ # underscore prefix to prevent overwriting facts outside of loop
+ _router: "{{
+ item.0|combine({'interfaces_info': item.1.interfaces_info|from_yaml})
+ }}"
+ register: routers
+
+- name: Remove set_fact artifacts from routers
+ set_fact:
+ routers: "{{ {
+ 'routers': routers.results|map(attribute='ansible_facts._router')|list
+ } }}"
+
+- debug: var=routers
'''
RETURN = '''
-openstack_routers:
+routers:
description: has all the openstack information about the routers
returned: always, but can be null
- type: complex
+ type: list
+ elements: dict
contains:
- id:
- description: Unique UUID.
+ availability_zones:
+ description: Availability zones
returned: success
- type: str
- name:
- description: Name given to the router.
+ type: list
+ availability_zone_hints:
+ description: Availability zone hints
+ returned: success
+ type: list
+ created_at:
+ description: Date and time when the router was created
returned: success
type: str
- status:
- description: Router status.
+ description:
+ description: Description notes of the router
returned: success
type: str
external_gateway_info:
description: The external gateway information of the router.
returned: success
type: dict
- interfaces_info:
- description: List of connected interfaces.
+ flavor_id:
+ description: ID of the flavor of the router
returned: success
- type: list
- distributed:
+ type: str
+ id:
+ description: Unique UUID.
+ returned: success
+ type: str
+ is_admin_state_up:
+ description: Network administrative state
+ returned: success
+ type: bool
+ is_distributed:
description: Indicates a distributed router.
returned: success
type: bool
- ha:
+ is_ha:
description: Indicates a highly-available router.
returned: success
type: bool
+ name:
+ description: Name given to the router.
+ returned: success
+ type: str
project_id:
description: Project id associated with this router.
returned: success
type: str
+ revision_number:
+ description: Revision number
+ returned: success
+ type: int
routes:
description: The extra routes configuration for L3 router.
returned: success
type: list
+ status:
+ description: Router status.
+ returned: success
+ type: str
+ tags:
+ description: List of tags
+ returned: success
+ type: list
+ tenant_id:
+ description: Owner tenant ID
+ returned: success
+ type: str
+ updated_at:
+ description: Date of last update on the router
+ returned: success
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -150,39 +252,21 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class RouterInfoModule(OpenStackModule):
- deprecated_names = ('os_routers_info', 'openstack.cloud.os_routers_info')
-
argument_spec = dict(
- name=dict(required=False, default=None),
- filters=dict(required=False, type='dict', default=None)
+ name=dict(),
+ filters=dict(type='dict', default={})
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
-
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
- if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
- routers = self.conn.search_routers(**kwargs)
-
- for router in routers:
- interfaces_info = []
- for port in self.conn.list_router_interfaces(router):
- if port.device_owner != "network:router_gateway":
- for ip_spec in port.fixed_ips:
- int_info = {
- 'port_id': port.id,
- 'ip_address': ip_spec.get('ip_address'),
- 'subnet_id': ip_spec.get('subnet_id')
- }
- interfaces_info.append(int_info)
- router['interfaces_info'] = interfaces_info
-
- self.exit(changed=False, openstack_routers=routers)
+ routers = [
+ router.to_dict(computed=False)
+ for router in self.conn.search_routers(
+ name_or_id=self.params['name'],
+ filters=self.params['filters'])]
+ self.exit(changed=False, routers=routers)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/security_group.py b/ansible_collections/openstack/cloud/plugins/modules/security_group.py
index 8208a1c22..2196e8fac 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/security_group.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/security_group.py
@@ -1,147 +1,572 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: security_group
-short_description: Add/Delete security groups from an OpenStack cloud.
+short_description: Manage Neutron security groups of an OpenStack cloud.
author: OpenStack Ansible SIG
description:
- - Add or Remove security groups from an OpenStack cloud.
+ - Add or remove Neutron security groups to/from an OpenStack cloud.
options:
- name:
- description:
- - Name that has to be given to the security group. This module
- requires that security group names be unique.
- required: true
- type: str
- description:
- description:
- - Long description of the purpose of the security group
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- project:
- description:
- - Unique name or ID of the project.
- required: false
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Long description of the purpose of the security group.
+ type: str
+ name:
+ description:
+ - Name that has to be given to the security group. This module
+ requires that security group names be unique.
+ required: true
+ type: str
+ project:
+ description:
+ - Unique name or ID of the project.
+ type: str
+ security_group_rules:
+ description:
+ - List of security group rules.
+ - When I(security_group_rules) is not defined, Neutron might create this
+ security group with a default set of rules.
+ - Security group rules which are listed in I(security_group_rules)
+ but not defined in this security group will be created.
+ - When I(security_group_rules) is not set, existing security group rules
+ which are not listed in I(security_group_rules) will be deleted.
+ - When updating a security group, one has to explicitly list rules from
+ Neutron's defaults in I(security_group_rules) if those rules should be
+ kept. Rules which are not listed in I(security_group_rules) will be
+ deleted.
+ type: list
+ elements: dict
+ suboptions:
+ description:
+ description:
+ - Description of the security group rule.
+ type: str
+ direction:
+ description:
+ - The direction in which the security group rule is applied.
+ - Not all providers support C(egress).
+ choices: ['egress', 'ingress']
+ default: ingress
+ type: str
+ ether_type:
+ description:
+ - Must be IPv4 or IPv6, and addresses represented in CIDR must
+ match the ingress or egress rules. Not all providers support IPv6.
+ choices: ['IPv4', 'IPv6']
+ default: IPv4
+ type: str
+ port_range_max:
+ description:
+ - The maximum port number in the range that is matched by the
+ security group rule.
+ - If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must
+ be greater than or equal to the I(port_range_min) attribute value.
+ - If the protocol is ICMP, this value must be an ICMP code.
+ type: int
+ port_range_min:
+ description:
+ - The minimum port number in the range that is matched by the
+ security group rule.
+ - If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must
+ be less than or equal to the port_range_max attribute value.
+ - If the protocol is ICMP, this value must be an ICMP type.
+ type: int
+ protocol:
+ description:
+ - The IP protocol can be represented by a string, an integer, or
+ null.
+ - Valid string or integer values are C(any) or C(0), C(ah) or C(51),
+ C(dccp) or C(33), C(egp) or C(8), C(esp) or C(50), C(gre) or C(47),
+ C(icmp) or C(1), C(icmpv6) or C(58), C(igmp) or C(2), C(ipip) or
+ C(4), C(ipv6-encap) or C(41), C(ipv6-frag) or C(44), C(ipv6-icmp)
+ or C(58), C(ipv6-nonxt) or C(59), C(ipv6-opts) or C(60),
+ C(ipv6-route) or C(43), C(ospf) or C(89), C(pgm) or C(113), C(rsvp)
+ or C(46), C(sctp) or C(132), C(tcp) or C(6), C(udp) or C(17),
+ C(udplite) or C(136), C(vrrp) or C(112).
+ - Additionally, any integer value between C([0-255]) is also valid.
+ - The string any (or integer 0) means all IP protocols.
+ - See the constants in neutron_lib.constants for the most up-to-date
+ list of supported strings.
+ type: str
+ remote_group:
+ description:
+ - Name or ID of the security group to link.
+ - Mutually exclusive with I(remote_ip_prefix).
+ type: str
+ remote_ip_prefix:
+ description:
+ - Source IP address(es) in CIDR notation.
+ - When a netmask such as C(/32) is missing from I(remote_ip_prefix),
+ then this module will fail on updates with OpenStack error message
+ C(Security group rule already exists.).
+ - Mutually exclusive with I(remote_group).
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+ stateful:
+ description:
+ - Should the resource be stateful or stateless.
+ type: bool
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
+'''
+
+RETURN = r'''
+security_group:
+ description: Dictionary describing the security group.
+ type: dict
+ returned: On success when I(state) is C(present).
+ contains:
+ created_at:
+ description: Creation time of the security group
+ type: str
+ sample: "yyyy-mm-dd hh:mm:ss"
+ description:
+ description: Description of the security group
+ type: str
+ sample: "My security group"
+ id:
+ description: ID of the security group
+ type: str
+ sample: "d90e55ba-23bd-4d97-b722-8cb6fb485d69"
+ name:
+ description: Name of the security group.
+ type: str
+ sample: "my-sg"
+ project_id:
+ description: Project ID where the security group is located in.
+ type: str
+ sample: "25d24fc8-d019-4a34-9fff-0a09fde6a567"
+ revision_number:
+ description: The revision number of the resource.
+ type: int
+ tenant_id:
+ description: Tenant ID where the security group is located in. Deprecated
+ type: str
+ sample: "25d24fc8-d019-4a34-9fff-0a09fde6a567"
+ security_group_rules:
+ description: Specifies the security group rule list
+ type: list
+ sample: [
+ {
+ "id": "d90e55ba-23bd-4d97-b722-8cb6fb485d69",
+ "direction": "ingress",
+ "protocol": null,
+ "ethertype": "IPv4",
+ "description": null,
+ "remote_group_id": "0431c9c5-1660-42e0-8a00-134bec7f03e2",
+ "remote_ip_prefix": null,
+ "tenant_id": "bbfe8c41dd034a07bebd592bf03b4b0c",
+ "port_range_max": null,
+ "port_range_min": null,
+ "security_group_id": "0431c9c5-1660-42e0-8a00-134bec7f03e2"
+ },
+ {
+ "id": "aecff4d4-9ce9-489c-86a3-803aedec65f7",
+ "direction": "egress",
+ "protocol": null,
+ "ethertype": "IPv4",
+ "description": null,
+ "remote_group_id": null,
+ "remote_ip_prefix": null,
+ "tenant_id": "bbfe8c41dd034a07bebd592bf03b4b0c",
+ "port_range_max": null,
+ "port_range_min": null,
+ "security_group_id": "0431c9c5-1660-42e0-8a00-134bec7f03e2"
+ }
+ ]
+ stateful:
+ description: Indicates if the security group is stateful or stateless.
+ type: bool
+ tags:
+ description: The list of tags on the resource.
+ type: list
+ updated_at:
+ description: Update time of the security group
+ type: str
+ sample: "yyyy-mm-dd hh:mm:ss"
'''
-EXAMPLES = '''
-# Create a security group
-- openstack.cloud.security_group:
+EXAMPLES = r'''
+- name: Create a security group
+ openstack.cloud.security_group:
cloud: mordred
state: present
name: foo
description: security group for foo servers
-# Update the existing 'foo' security group description
-- openstack.cloud.security_group:
+- name: Create a stateless security group
+ openstack.cloud.security_group:
+ cloud: mordred
+ state: present
+ stateful: false
+ name: foo
+ description: stateless security group for foo servers
+
+- name: Update the existing 'foo' security group description
+ openstack.cloud.security_group:
cloud: mordred
state: present
name: foo
description: updated description for the foo security group
-# Create a security group for a given project
-- openstack.cloud.security_group:
+- name: Create a security group for a given project
+ openstack.cloud.security_group:
cloud: mordred
state: present
name: foo
project: myproj
+
+- name: Create (or update) a security group with security group rules
+ openstack.cloud.security_group:
+ cloud: mordred
+ state: present
+ name: foo
+ security_group_rules:
+ - ether_type: IPv6
+ direction: egress
+ - ether_type: IPv4
+ direction: egress
+
+- name: Create (or update) security group without security group rules
+ openstack.cloud.security_group:
+ cloud: mordred
+ state: present
+ name: foo
+ security_group_rules: []
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class SecurityGroupModule(OpenStackModule):
+ # NOTE: Keep handling of security group rules synchronized with
+ # security_group_rule.py!
argument_spec = dict(
+ description=dict(),
name=dict(required=True),
- description=dict(default=''),
+ project=dict(),
+ security_group_rules=dict(
+ type="list", elements="dict",
+ options=dict(
+ description=dict(),
+ direction=dict(default="ingress",
+ choices=["egress", "ingress"]),
+ ether_type=dict(default="IPv4", choices=["IPv4", "IPv6"]),
+ port_range_max=dict(type="int"),
+ port_range_min=dict(type="int"),
+ protocol=dict(),
+ remote_group=dict(),
+ remote_ip_prefix=dict(),
+ ),
+ ),
state=dict(default='present', choices=['absent', 'present']),
- project=dict(default=None),
+ stateful=dict(type="bool"),
)
- def _needs_update(self, secgroup):
- """Check for differences in the updatable values.
-
- NOTE: We don't currently allow name updates.
- """
- if secgroup['description'] != self.params['description']:
- return True
- return False
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
- def _system_state_change(self, secgroup):
+ def run(self):
state = self.params['state']
- if state == 'present':
- if not secgroup:
- return True
- return self._needs_update(secgroup)
- if state == 'absent' and secgroup:
- return True
- return False
- def run(self):
+ security_group = self._find()
- name = self.params['name']
- state = self.params['state']
- description = self.params['description']
- project = self.params['project']
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- else:
- project_id = self.conn.current_project_id
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, security_group))
- if project_id:
- filters = {'tenant_id': project_id}
- else:
- filters = None
+ if state == 'present' and not security_group:
+ # Create security_group
+ security_group = self._create()
+ self.exit_json(
+ changed=True,
+ security_group=security_group.to_dict(computed=False))
- secgroup = self.conn.get_security_group(name, filters=filters)
+ elif state == 'present' and security_group:
+ # Update security_group
+ update = self._build_update(security_group)
+ if update:
+ security_group = self._update(security_group, update)
- if self.ansible.check_mode:
- self.exit(changed=self._system_state_change(secgroup))
-
- changed = False
- if state == 'present':
- if not secgroup:
- kwargs = {}
- if project_id:
- kwargs['project_id'] = project_id
- secgroup = self.conn.create_security_group(name, description,
- **kwargs)
- changed = True
+ self.exit_json(
+ changed=bool(update),
+ security_group=security_group.to_dict(computed=False))
+
+ elif state == 'absent' and security_group:
+ # Delete security_group
+ self._delete(security_group)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not security_group:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, security_group):
+ return {
+ **self._build_update_security_group(security_group),
+ **self._build_update_security_group_rules(security_group)}
+
+ def _build_update_security_group(self, security_group):
+ update = {}
+
+ # module options name and project are used to find security group
+ # and thus cannot be updated
+
+ non_updateable_keys = [k for k in []
+ if self.params[k] is not None
+ and self.params[k] != security_group[k]]
+
+ if non_updateable_keys:
+ self.fail_json(msg='Cannot update parameters {0}'
+ .format(non_updateable_keys))
+
+ attributes = dict((k, self.params[k])
+ for k in ['description']
+ if self.params[k] is not None
+ and self.params[k] != security_group[k])
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
+
+ def _build_update_security_group_rules(self, security_group):
+
+ if self.params['security_group_rules'] is None:
+ # Consider a change of security group rules only when option
+ # 'security_group_rules' was defined explicitly, because undefined
+ # options in our Ansible modules denote "apply no change"
+ return {}
+
+ def find_security_group_rule_match(prototype, security_group_rules):
+ matches = [r for r in security_group_rules
+ if is_security_group_rule_match(prototype, r)]
+ if len(matches) > 1:
+ self.fail_json(msg='Found more a single matching security'
+ ' group rule which match the given'
+ ' parameters.')
+ elif len(matches) == 1:
+ return matches[0]
+ else: # len(matches) == 0
+ return None
+
+ def is_security_group_rule_match(prototype, security_group_rule):
+ skip_keys = ['ether_type']
+ if 'ether_type' in prototype \
+ and security_group_rule['ethertype'] != prototype['ether_type']:
+ return False
+
+ if 'protocol' in prototype \
+ and prototype['protocol'] in ['tcp', 'udp']:
+ # Check if the user is supplying -1, 1 to 65535 or None values
+ # for full TPC or UDP port range.
+ # (None, None) == (1, 65535) == (-1, -1)
+ if 'port_range_max' in prototype \
+ and prototype['port_range_max'] in [-1, 65535]:
+ if security_group_rule['port_range_max'] is not None:
+ return False
+ skip_keys.append('port_range_max')
+ if 'port_range_min' in prototype \
+ and prototype['port_range_min'] in [-1, 1]:
+ if security_group_rule['port_range_min'] is not None:
+ return False
+ skip_keys.append('port_range_min')
+
+ if all(security_group_rule[k] == prototype[k]
+ for k in (set(prototype.keys()) - set(skip_keys))):
+ return security_group_rule
+ else:
+ return None
+
+ update = {}
+ keep_security_group_rules = {}
+ create_security_group_rules = []
+ delete_security_group_rules = []
+
+ for prototype in self._generate_security_group_rules(security_group):
+ match = find_security_group_rule_match(
+ prototype, security_group.security_group_rules)
+ if match:
+ keep_security_group_rules[match['id']] = match
else:
- if self._needs_update(secgroup):
- secgroup = self.conn.update_security_group(
- secgroup['id'], description=description)
- changed = True
- self.exit(
- changed=changed, id=secgroup['id'], secgroup=secgroup)
-
- if state == 'absent':
- if secgroup:
- self.conn.delete_security_group(secgroup['id'])
- changed = True
- self.exit(changed=changed)
+ create_security_group_rules.append(prototype)
+
+ for security_group_rule in security_group.security_group_rules:
+ if (security_group_rule['id']
+ not in keep_security_group_rules.keys()):
+ delete_security_group_rules.append(security_group_rule)
+
+ if create_security_group_rules:
+ update['create_security_group_rules'] = create_security_group_rules
+
+ if delete_security_group_rules:
+ update['delete_security_group_rules'] = delete_security_group_rules
+
+ return update
+
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['description', 'name', 'stateful']
+ if self.params[k] is not None)
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(
+ name_or_id=project_name_or_id, ignore_missing=False)
+ kwargs['project_id'] = project.id
+
+ security_group = self.conn.network.create_security_group(**kwargs)
+
+ update = self._build_update_security_group_rules(security_group)
+ if update:
+ security_group = self._update_security_group_rules(security_group,
+ update)
+
+ return security_group
+
+ def _delete(self, security_group):
+ self.conn.network.delete_security_group(security_group.id)
+
+ def _find(self):
+ kwargs = dict(name_or_id=self.params['name'])
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(
+ name_or_id=project_name_or_id, ignore_missing=False)
+ kwargs['project_id'] = project.id
+
+ return self.conn.network.find_security_group(**kwargs)
+
+ def _generate_security_group_rules(self, security_group):
+ security_group_cache = {}
+ security_group_cache[security_group.name] = security_group
+ security_group_cache[security_group.id] = security_group
+
+ def _generate_security_group_rule(params):
+ prototype = dict(
+ (k, params[k])
+ for k in ['description', 'direction', 'remote_ip_prefix']
+ if params[k] is not None)
+
+ # When remote_ip_prefix is missing a netmask, then Neutron will add
+ # a netmask using Python library netaddr [0] and its IPNetwork
+ # class [1]. We do not want to introduce additional Python
+ # dependencies to our code base and neither want to replicate
+ # netaddr's parse_ip_network code here. So we do not handle
+ # remote_ip_prefix without a netmask and instead let Neutron handle
+ # it.
+ # [0] https://opendev.org/openstack/neutron/src/commit/\
+ # 43d94640568828f5e98bbb1e9df985ec3f1bb2d2/neutron/db/securitygroups_db.py#L775
+ # [1] https://github.com/netaddr/netaddr/blob/\
+ # b1d8f016abee00c8a93e35b928acdc22797c800a/netaddr/ip/__init__.py#L841
+ # [2] https://github.com/netaddr/netaddr/blob/\
+ # b1d8f016abee00c8a93e35b928acdc22797c800a/netaddr/ip/__init__.py#L773
+
+ prototype['project_id'] = security_group.project_id
+ prototype['security_group_id'] = security_group.id
+
+ remote_group_name_or_id = params['remote_group']
+ if remote_group_name_or_id is not None:
+ if remote_group_name_or_id in security_group_cache:
+ remote_group = \
+ security_group_cache[remote_group_name_or_id]
+ else:
+ remote_group = self.conn.network.find_security_group(
+ remote_group_name_or_id, ignore_missing=False)
+ security_group_cache[remote_group_name_or_id] = \
+ remote_group
+
+ prototype['remote_group_id'] = remote_group.id
+
+ ether_type = params['ether_type']
+ if ether_type is not None:
+ prototype['ether_type'] = ether_type
+
+ protocol = params['protocol']
+ if protocol is not None and protocol not in ['any', '0']:
+ prototype['protocol'] = protocol
+
+ port_range_max = params['port_range_max']
+ port_range_min = params['port_range_min']
+
+ if protocol in ['icmp', 'ipv6-icmp']:
+ # Check if the user is supplying -1 for ICMP.
+ if port_range_max is not None and int(port_range_max) != -1:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None and int(port_range_min) != -1:
+ prototype['port_range_min'] = int(port_range_min)
+ elif protocol in ['tcp', 'udp']:
+ if port_range_max is not None and int(port_range_max) != -1:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None and int(port_range_min) != -1:
+ prototype['port_range_min'] = int(port_range_min)
+ elif protocol in ['any', '0']:
+ # Rules with 'any' protocol do not match ports
+ pass
+ else:
+ if port_range_max is not None:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None:
+ prototype['port_range_min'] = int(port_range_min)
+
+ return prototype
+
+ return [_generate_security_group_rule(r)
+ for r in (self.params['security_group_rules'] or [])]
+
+ def _update(self, security_group, update):
+ security_group = self._update_security_group(security_group, update)
+ return self._update_security_group_rules(security_group, update)
+
+ def _update_security_group(self, security_group, update):
+ attributes = update.get('attributes')
+ if attributes:
+ security_group = self.conn.network.update_security_group(
+ security_group.id, **attributes)
+
+ return security_group
+
+ def _update_security_group_rules(self, security_group, update):
+ delete_security_group_rules = update.get('delete_security_group_rules')
+ if delete_security_group_rules:
+ for security_group_rule in delete_security_group_rules:
+ self.conn.network.\
+ delete_security_group_rule(security_group_rule['id'])
+
+ create_security_group_rules = update.get('create_security_group_rules')
+ if create_security_group_rules:
+ self.conn.network.\
+ create_security_group_rules(create_security_group_rules)
+
+ if create_security_group_rules or delete_security_group_rules:
+ # Update security group with created and deleted rules
+ return self.conn.network.get_security_group(security_group.id)
+ else:
+ return security_group
+
+ def _will_change(self, state, security_group):
+ if state == 'present' and not security_group:
+ return True
+ elif state == 'present' and security_group:
+ return bool(self._build_update(security_group))
+ elif state == 'absent' and security_group:
+ return True
+ else:
+ # state == 'absent' and not security_group:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/security_group_info.py b/ansible_collections/openstack/cloud/plugins/modules/security_group_info.py
index bc05356af..c4f3ae136 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/security_group_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/security_group_info.py
@@ -1,45 +1,35 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2020 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: security_group_info
short_description: Lists security groups
-extends_documentation_fragment: openstack.cloud.openstack
author: OpenStack Ansible SIG
description:
- List security groups
options:
+ any_tags:
+ description:
+ - A list of tags to filter the list result by.
+ - Resources that match any tag in this list will be returned.
+ type: list
+ elements: str
description:
description:
- - Description of the security group
+ - Description of the security group.
type: str
name:
description:
- Name or id of the security group.
type: str
- project_id:
- description:
- - Specifies the project id as filter criteria
- type: str
- revision_number:
- description:
- - Filter the list result by the revision number of the
- - resource.
- type: int
- tags:
- description:
- - A list of tags to filter the list result by.
- - Resources that match all tags in this list will be returned.
- type: list
- elements: str
- any_tags:
+ not_any_tags:
description:
- A list of tags to filter the list result by.
- - Resources that match any tag in this list will be returned.
+ - Resources that match any tag in this list will be excluded.
type: list
elements: str
not_tags:
@@ -48,21 +38,30 @@ options:
- Resources that match all tags in this list will be excluded.
type: list
elements: str
- not_any_tags:
+ project_id:
+ description:
+ - Specifies the project id as filter criteria.
+ type: str
+ revision_number:
+ description:
+ - Filter the list result by the revision number of the resource.
+ type: int
+ tags:
description:
- A list of tags to filter the list result by.
- - Resources that match any tag in this list will be excluded.
+ - Resources that match all tags in this list will be returned.
type: list
elements: str
-
-requirements: ["openstacksdk"]
+extends_documentation_fragment:
+ - openstack.cloud.openstack
'''
-RETURN = '''
+RETURN = r'''
security_groups:
description: List of dictionaries describing security groups.
- type: complex
- returned: On Success.
+ type: list
+ elements: dict
+ returned: always
contains:
created_at:
description: Creation time of the security group
@@ -84,6 +83,13 @@ security_groups:
description: Project ID where the security group is located in.
type: str
sample: "25d24fc8-d019-4a34-9fff-0a09fde6a567"
+ revision_number:
+ description: The revision number of the resource.
+ type: int
+ tenant_id:
+ description: Tenant ID where the security group is located in. Deprecated
+ type: str
+ sample: "25d24fc8-d019-4a34-9fff-0a09fde6a567"
security_group_rules:
description: Specifies the security group rule list
type: list
@@ -115,76 +121,71 @@ security_groups:
"security_group_id": "0431c9c5-1660-42e0-8a00-134bec7f03e2"
}
]
+ stateful:
+ description: Indicates if the security group is stateful or stateless.
+ type: bool
+ tags:
+ description: The list of tags on the resource.
+ type: list
updated_at:
description: Update time of the security group
type: str
sample: "yyyy-mm-dd hh:mm:ss"
'''
-EXAMPLES = '''
-# Get specific security group
-- openstack.cloud.security_group_info:
- cloud: "{{ cloud }}"
- name: "{{ my_sg }}"
- register: sg
-# Get all security groups
-- openstack.cloud.security_group_info:
- cloud: "{{ cloud }}"
- register: sg
+EXAMPLES = r'''
+- name: Get all security groups
+ openstack.cloud.security_group_info:
+ cloud: devstack
+
+- name: Get specific security group
+ openstack.cloud.security_group_info:
+ cloud: devstack
+ name: my_sg
'''
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- OpenStackModule)
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class SecurityGroupInfoModule(OpenStackModule):
argument_spec = dict(
- description=dict(required=False, type='str'),
- name=dict(required=False, type='str'),
- project_id=dict(required=False, type='str'),
- revision_number=dict(required=False, type='int'),
- tags=dict(required=False, type='list', elements='str'),
- any_tags=dict(required=False, type='list', elements='str'),
- not_tags=dict(required=False, type='list', elements='str'),
- not_any_tags=dict(required=False, type='list', elements='str')
+ any_tags=dict(type='list', elements='str'),
+ description=dict(),
+ name=dict(),
+ not_any_tags=dict(type='list', elements='str'),
+ not_tags=dict(type='list', elements='str'),
+ project_id=dict(),
+ revision_number=dict(type='int'),
+ tags=dict(type='list', elements='str'),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- description = self.params['description']
name = self.params['name']
- project_id = self.params['project_id']
- revision_number = self.params['revision_number']
- tags = self.params['tags']
- any_tags = self.params['any_tags']
- not_tags = self.params['not_tags']
- not_any_tags = self.params['not_any_tags']
-
- attrs = {}
-
- if description:
- attrs['description'] = description
- if project_id:
- attrs['project_id'] = project_id
- if revision_number:
- attrs['revision_number'] = revision_number
- if tags:
- attrs['tags'] = ','.join(tags)
- if any_tags:
- attrs['any_tags'] = ','.join(any_tags)
- if not_tags:
- attrs['not_tags'] = ','.join(not_tags)
- if not_any_tags:
- attrs['not_any_tags'] = ','.join(not_any_tags)
-
- attrs = self.check_versioned(**attrs)
- result = self.conn.network.security_groups(**attrs)
- result = [item if isinstance(item, dict) else item.to_dict() for item in result]
+ args = {k: self.params[k]
+ for k in ['description', 'project_id', 'revision_number']
+ if self.params[k]}
+
+ args.update({k: ','.join(self.params[k])
+ for k in ['tags', 'any_tags', 'not_tags', 'not_any_tags']
+ if self.params[k]})
+
+ # self.conn.search_security_groups() cannot be used here,
+ # refer to git blame for rationale.
+ security_groups = self.conn.network.security_groups(**args)
+
if name:
- result = [item for item in result if name in (item['id'], item['name'])]
- self.results.update({'security_groups': result})
+ # TODO: Upgrade name_or_id code to match openstacksdk [1]?
+ # [1] https://opendev.org/openstack/openstacksdk/src/commit/
+ # 0898398415ae7b0e2447d61226acf50f01567cdd/openstack/cloud/_utils.py#L89
+ security_groups = [item for item in security_groups
+ if name in (item['id'], item['name'])]
+
+ self.exit(changed=False,
+ security_groups=[sg.to_dict(computed=False)
+ for sg in security_groups])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/security_group_rule.py b/ansible_collections/openstack/cloud/plugins/modules/security_group_rule.py
index 53fe6f590..a599b5c6c 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/security_group_rule.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/security_group_rule.py
@@ -1,84 +1,110 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: security_group_rule
-short_description: Add/Delete rule from an existing security group
+short_description: Manage security group rules in OpenStack network (Neutron)
author: OpenStack Ansible SIG
description:
- - Add or Remove rule from an existing security group
+ - Add or remove security group rule to/from OpenStack network (Neutron)
+ service.
+ - Use I(security_group_rules) in M(openstack.cloud.security_group) to define
+ a set of security group rules. It will be much faster than using this
+ module when creating or removing several security group rules because the
+ latter will do individual calls to OpenStack network (Neutron) API for each
+ security group rule.
options:
- security_group:
- description:
- - Name or ID of the security group
- required: true
- type: str
- protocol:
- description:
- - IP protocols ANY TCP UDP ICMP and others, also number in range 0-255
- type: str
- port_range_min:
- description:
- - Starting port
- type: int
- port_range_max:
- description:
- - Ending port
- type: int
- remote_ip_prefix:
- description:
- - Source IP address(es) in CIDR notation (exclusive with remote_group)
- type: str
- remote_group:
- description:
- - Name or ID of the Security group to link (exclusive with
- remote_ip_prefix)
- type: str
- ethertype:
- description:
- - Must be IPv4 or IPv6, and addresses represented in CIDR must
- match the ingress or egress rules. Not all providers support IPv6.
- choices: ['IPv4', 'IPv6']
- default: IPv4
- type: str
- direction:
- description:
- - The direction in which the security group rule is applied. Not
- all providers support egress.
- choices: ['egress', 'ingress']
- default: ingress
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- project:
- description:
- - Unique name or ID of the project.
- required: false
- type: str
- description:
- required: false
- description:
- - Description of the rule.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - Description of the security group rule.
+ type: str
+ direction:
+ description:
+ - The direction in which the security group rule is applied.
+ - Not all providers support C(egress).
+ choices: ['egress', 'ingress']
+ default: ingress
+ type: str
+ ether_type:
+ description:
+ - Must be IPv4 or IPv6, and addresses represented in CIDR must
+ match the ingress or egress rules. Not all providers support IPv6.
+ choices: ['IPv4', 'IPv6']
+ default: IPv4
+ type: str
+ aliases: ['ethertype']
+ port_range_max:
+ description:
+ - The maximum port number in the range that is matched by the security
+ group rule.
+ - If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must be
+ greater than or equal to the I(port_range_min) attribute value.
+ - If the protocol is ICMP, this value must be an ICMP code.
+ type: int
+ port_range_min:
+ description:
+ - The minimum port number in the range that is matched by the security
+ group rule.
+ - If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must be
+ less than or equal to the port_range_max attribute value.
+ - If the protocol is ICMP, this value must be an ICMP type.
+ type: int
+ project:
+ description:
+ - Unique name or ID of the project.
+ type: str
+ protocol:
+ description:
+ - The IP protocol can be represented by a string, an integer, or null.
+ - Valid string or integer values are C(any) or C(0), C(ah) or C(51),
+ C(dccp) or C(33), C(egp) or C(8), C(esp) or C(50), C(gre) or C(47),
+ C(icmp) or C(1), C(icmpv6) or C(58), C(igmp) or C(2), C(ipip) or C(4),
+ C(ipv6-encap) or C(41), C(ipv6-frag) or C(44), C(ipv6-icmp) or C(58),
+ C(ipv6-nonxt) or C(59), C(ipv6-opts) or C(60), C(ipv6-route) or C(43),
+ C(ospf) or C(89), C(pgm) or C(113), C(rsvp) or C(46), C(sctp) or
+ C(132), C(tcp) or C(6), C(udp) or C(17), C(udplite) or C(136), C(vrrp)
+ or C(112).
+ - Additionally, any integer value between C([0-255]) is also valid.
+ - The string any (or integer 0) means all IP protocols.
+ - See the constants in neutron_lib.constants for the most up-to-date
+ list of supported strings.
+ type: str
+ remote_group:
+ description:
+ - Name or ID of the security group to link.
+ - Mutually exclusive with I(remote_ip_prefix).
+ type: str
+ remote_ip_prefix:
+ description:
+ - Source IP address(es) in CIDR notation.
+ - When a netmask such as C(/32) is missing from I(remote_ip_prefix), then
+ this module will fail on updates with OpenStack error message
+ C(Security group rule already exists.).
+ - Mutually exclusive with I(remote_group).
+ type: str
+ security_group:
+ description:
+ - Name or ID of the security group.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create a security group rule
-- openstack.cloud.security_group_rule:
+EXAMPLES = r'''
+- name: Create a security group rule
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
@@ -86,15 +112,15 @@ EXAMPLES = '''
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
-# Create a security group rule for ping
-- openstack.cloud.security_group_rule:
+- name: Create a security group rule for ping
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
-# Another way to create the ping rule
-- openstack.cloud.security_group_rule:
+- name: Another way to create the ping rule
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
@@ -102,8 +128,8 @@ EXAMPLES = '''
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
-# Create a TCP rule covering all ports
-- openstack.cloud.security_group_rule:
+- name: Create a TCP rule covering all ports
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
@@ -111,273 +137,296 @@ EXAMPLES = '''
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
-# Another way to create the TCP rule above (defaults to all ports)
-- openstack.cloud.security_group_rule:
+- name: Another way to create the TCP rule above (defaults to all ports)
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
-# Create a rule for VRRP with numbered protocol 112
-- openstack.cloud.security_group_rule:
+- name: Create a rule for VRRP with numbered protocol 112
+ openstack.cloud.security_group_rule:
security_group: loadbalancer_sg
protocol: 112
remote_group: loadbalancer-node_sg
-# Create a security group rule for a given project
-- openstack.cloud.security_group_rule:
+- name: Create a security group rule for a given project
+ openstack.cloud.security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
project: myproj
-# Remove the default created egress rule for IPv4
-- openstack.cloud.security_group_rule:
- cloud: mordred
- security_group: foo
- protocol: any
- remote_ip_prefix: 0.0.0.0/0
+- name: Remove the default created egress rule for IPv4
+ openstack.cloud.security_group_rule:
+ cloud: mordred
+ security_group: foo
+ protocol: any
+ remote_ip_prefix: 0.0.0.0/0
'''
-RETURN = '''
-id:
- description: Unique rule UUID.
- type: str
- returned: state == present
-direction:
- description: The direction in which the security group rule is applied.
- type: str
- sample: 'egress'
- returned: state == present
-ethertype:
- description: One of IPv4 or IPv6.
- type: str
- sample: 'IPv4'
- returned: state == present
-port_range_min:
- description: The minimum port number in the range that is matched by
- the security group rule.
- type: int
- sample: 8000
- returned: state == present
-port_range_max:
- description: The maximum port number in the range that is matched by
- the security group rule.
- type: int
- sample: 8000
- returned: state == present
-protocol:
- description: The protocol that is matched by the security group rule.
- type: str
- sample: 'tcp'
- returned: state == present
-remote_ip_prefix:
- description: The remote IP prefix to be associated with this security group rule.
- type: str
- sample: '0.0.0.0/0'
- returned: state == present
-security_group_id:
- description: The security group ID to associate with this security group rule.
- type: str
- returned: state == present
+RETURN = r'''
+rule:
+ description: Dictionary describing the security group rule
+ type: dict
+ returned: On success when I(state) is C(present).
+ contains:
+ created_at:
+ description: Timestamp when the resource was created
+ type: str
+ description:
+ description: Description of the resource
+ type: str
+ direction:
+ description: The direction in which the security group rule is applied.
+ type: str
+ sample: 'egress'
+ ether_type:
+ description: Either IPv4 or IPv6
+ type: str
+ id:
+ description: Unique rule UUID.
+ type: str
+ name:
+ description: Name of the resource.
+ type: str
+ port_range_max:
+ description: The maximum port number in the range that is matched by
+ the security group rule.
+ type: int
+ sample: 8000
+ port_range_min:
+ description: The minimum port number in the range that is matched by
+ the security group rule.
+ type: int
+ sample: 8000
+ project_id:
+ description: ID of the project the resource belongs to.
+ type: str
+ protocol:
+ description: The protocol that is matched by the security group rule.
+ type: str
+ sample: 'tcp'
+ remote_address_group_id:
+ description: The remote address group ID to be associated with this
+ security group rule.
+ type: str
+ sample: '0.0.0.0/0'
+ remote_group_id:
+ description: The remote security group ID to be associated with this
+ security group rule.
+ type: str
+ sample: '0.0.0.0/0'
+ remote_ip_prefix:
+ description: The remote IP prefix to be associated with this security
+ group rule.
+ type: str
+ sample: '0.0.0.0/0'
+ revision_number:
+ description: Revision number
+ type: int
+ sample: 0
+ security_group_id:
+ description: The security group ID to associate with this security group
+ rule.
+ type: str
+ tags:
+ description: Tags associated with resource.
+ type: list
+ elements: str
+ tenant_id:
+ description: ID of the project the resource belongs to. Deprecated.
+ type: str
+ updated_at:
+ description: Timestamp when the security group rule was last updated.
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
OpenStackModule)
-def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
- """
- Capture the complex port matching logic.
-
- The port values coming in for the module might be -1 (for ICMP),
- which will work only for Nova, but this is handled by sdk. Likewise,
- they might be None, which works for Neutron, but not Nova. This too is
- handled by sdk. Since sdk will consistently return these port
- values as None, we need to convert any -1 values input to the module
- to None here for comparison.
-
- For TCP and UDP protocols, None values for both min and max are
- represented as the range 1-65535 for Nova, but remain None for
- Neutron. sdk returns the full range when Nova is the backend (since
- that is how Nova stores them), and None values for Neutron. If None
- values are input to the module for both values, then we need to adjust
- for comparison.
- """
-
- # Check if the user is supplying -1 for ICMP.
- if protocol in ['icmp', 'ipv6-icmp']:
- if module_min and int(module_min) == -1:
- module_min = None
- if module_max and int(module_max) == -1:
- module_max = None
-
- # Rules with 'any' protocol do not match ports
- if protocol == 'any':
- return True
-
- # Check if the user is supplying -1, 1 to 65535 or None values for full TPC/UDP port range.
- if protocol in ['tcp', 'udp'] or protocol is None:
- if (
- not module_min and not module_max
- or (int(module_min) in [-1, 1]
- and int(module_max) in [-1, 65535])
- ):
- if (
- not rule_min and not rule_max
- or (int(rule_min) in [-1, 1]
- and int(rule_max) in [-1, 65535])
- ):
- # (None, None) == (1, 65535) == (-1, -1)
- return True
-
- # Sanity check to make sure we don't have type comparison issues.
- if module_min:
- module_min = int(module_min)
- if module_max:
- module_max = int(module_max)
- if rule_min:
- rule_min = int(rule_min)
- if rule_max:
- rule_max = int(rule_max)
-
- return module_min == rule_min and module_max == rule_max
-
-
class SecurityGroupRuleModule(OpenStackModule):
- deprecated_names = ('os_security_group_rule', 'openstack.cloud.os_security_group_rule')
+ # NOTE: Keep handling of security group rules synchronized with
+ # security_group.py!
argument_spec = dict(
+ description=dict(),
+ direction=dict(default='ingress', choices=['egress', 'ingress']),
+ ether_type=dict(default='IPv4', choices=['IPv4', 'IPv6'],
+ aliases=['ethertype']),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ project=dict(),
+ protocol=dict(),
+ remote_group=dict(),
+ remote_ip_prefix=dict(),
security_group=dict(required=True),
- protocol=dict(type='str'),
- port_range_min=dict(required=False, type='int'),
- port_range_max=dict(required=False, type='int'),
- remote_ip_prefix=dict(required=False),
- remote_group=dict(required=False),
- ethertype=dict(default='IPv4',
- choices=['IPv4', 'IPv6']),
- direction=dict(default='ingress',
- choices=['egress', 'ingress']),
- state=dict(default='present',
- choices=['absent', 'present']),
- description=dict(required=False, default=None),
- project=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
- ]
+ ],
+ supports_check_mode=True,
)
- def _find_matching_rule(self, secgroup, remotegroup):
- """
- Find a rule in the group that matches the module parameters.
- :returns: The matching rule dict, or None if no matches.
- """
- protocol = self.params['protocol']
- remote_ip_prefix = self.params['remote_ip_prefix']
- ethertype = self.params['ethertype']
- direction = self.params['direction']
- remote_group_id = remotegroup['id']
-
- for rule in secgroup['security_group_rules']:
- if (
- protocol == rule['protocol']
- and remote_ip_prefix == rule['remote_ip_prefix']
- and ethertype == rule['ethertype']
- and direction == rule['direction']
- and remote_group_id == rule['remote_group_id']
- and _ports_match(
- protocol,
- self.params['port_range_min'],
- self.params['port_range_max'],
- rule['port_range_min'],
- rule['port_range_max'])
- ):
- return rule
- return None
-
- def _system_state_change(self, secgroup, remotegroup):
- state = self.params['state']
- if secgroup:
- rule_exists = self._find_matching_rule(secgroup, remotegroup)
- else:
- return False
-
- if state == 'present' and not rule_exists:
- return True
- if state == 'absent' and rule_exists:
- return True
- return False
-
def run(self):
-
state = self.params['state']
- security_group = self.params['security_group']
- remote_group = self.params['remote_group']
- project = self.params['project']
- changed = False
-
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- else:
- project_id = self.conn.current_project_id
- if project_id and not remote_group:
- filters = {'tenant_id': project_id}
- else:
- filters = None
+ security_group_rule = self._find()
- secgroup = self.conn.get_security_group(security_group, filters=filters)
+ if self.ansible.check_mode:
+ self.exit_json(
+ changed=self._will_change(state, security_group_rule))
+
+ if state == 'present' and not security_group_rule:
+ # Create security_group_rule
+ security_group_rule = self._create()
+ self.exit_json(changed=True,
+ rule=security_group_rule.to_dict(computed=False))
+
+ elif state == 'present' and security_group_rule:
+ # Only exact matches will cause security_group_rule to be not None
+ self.exit_json(changed=False,
+ rule=security_group_rule.to_dict(computed=False))
+ elif state == 'absent' and security_group_rule:
+ # Delete security_group_rule
+ self._delete(security_group_rule)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not security_group_rule:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _create(self):
+ prototype = self._define_prototype()
+ return self.conn.network.create_security_group_rule(**prototype)
+
+ def _define_prototype(self):
+ filters = {}
+ prototype = dict((k, self.params[k])
+ for k in ['description', 'direction',
+ 'remote_ip_prefix']
+ if self.params[k] is not None)
+
+ # When remote_ip_prefix is missing a netmask, then Neutron will add
+ # a netmask using Python library netaddr [0] and its IPNetwork
+ # class [1]. We do not want to introduce additional Python
+ # dependencies to our code base and neither want to replicate
+ # netaddr's parse_ip_network code here. So we do not handle
+ # remote_ip_prefix without a netmask and instead let Neutron handle
+ # it.
+ # [0] https://opendev.org/openstack/neutron/src/commit/\
+ # 43d94640568828f5e98bbb1e9df985ec3f1bb2d2/neutron/db/securitygroups_db.py#L775
+ # [1] https://github.com/netaddr/netaddr/blob/\
+ # b1d8f016abee00c8a93e35b928acdc22797c800a/netaddr/ip/__init__.py#L841
+ # [2] https://github.com/netaddr/netaddr/blob/\
+ # b1d8f016abee00c8a93e35b928acdc22797c800a/netaddr/ip/__init__.py#L773
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(project_name_or_id,
+ ignore_missing=False)
+ filters = {'project_id': project.id}
+ prototype['project_id'] = project.id
+
+ security_group_name_or_id = self.params['security_group']
+ security_group = self.conn.network.find_security_group(
+ security_group_name_or_id, ignore_missing=False, **filters)
+ prototype['security_group_id'] = security_group.id
+
+ remote_group = None
+ remote_group_name_or_id = self.params['remote_group']
+ if remote_group_name_or_id is not None:
+ remote_group = self.conn.network.find_security_group(
+ remote_group_name_or_id, ignore_missing=False)
+ prototype['remote_group_id'] = remote_group.id
+
+ ether_type = self.params['ether_type']
+ if ether_type is not None:
+ prototype['ether_type'] = ether_type
- if remote_group:
- remotegroup = self.conn.get_security_group(remote_group, filters=filters)
+ protocol = self.params['protocol']
+ if protocol is not None and protocol not in ['any', '0']:
+ prototype['protocol'] = protocol
+
+ port_range_max = self.params['port_range_max']
+ port_range_min = self.params['port_range_min']
+
+ if protocol in ['icmp', 'ipv6-icmp']:
+ # Check if the user is supplying -1 for ICMP.
+ if port_range_max is not None and int(port_range_max) != -1:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None and int(port_range_min) != -1:
+ prototype['port_range_min'] = int(port_range_min)
+ elif protocol in ['tcp', 'udp']:
+ if port_range_max is not None and int(port_range_max) != -1:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None and int(port_range_min) != -1:
+ prototype['port_range_min'] = int(port_range_min)
+ elif protocol in ['any', '0']:
+ # Rules with 'any' protocol do not match ports
+ pass
else:
- remotegroup = {'id': None}
-
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(secgroup, remotegroup))
-
- if state == 'present':
- if self.params['protocol'] == 'any':
- self.params['protocol'] = None
-
- if not secgroup:
- self.fail_json(msg='Could not find security group %s' % security_group)
-
- rule = self._find_matching_rule(secgroup, remotegroup)
- if not rule:
- kwargs = {}
- if project_id:
- kwargs['project_id'] = project_id
- if self.params["description"] is not None:
- kwargs["description"] = self.params['description']
- rule = self.conn.network.create_security_group_rule(
- security_group_id=secgroup['id'],
- port_range_min=None if self.params['port_range_min'] == -1 else self.params['port_range_min'],
- port_range_max=None if self.params['port_range_max'] == -1 else self.params['port_range_max'],
- protocol=self.params['protocol'],
- remote_ip_prefix=self.params['remote_ip_prefix'],
- remote_group_id=remotegroup['id'],
- direction=self.params['direction'],
- ethertype=self.params['ethertype'],
- **kwargs
- )
- changed = True
- self.exit_json(changed=changed, rule=rule, id=rule['id'])
-
- if state == 'absent' and secgroup:
- rule = self._find_matching_rule(secgroup, remotegroup)
- if rule:
- self.conn.delete_security_group_rule(rule['id'])
- changed = True
-
- self.exit_json(changed=changed)
+ if port_range_max is not None:
+ prototype['port_range_max'] = int(port_range_max)
+ if port_range_min is not None:
+ prototype['port_range_min'] = int(port_range_min)
+
+ return prototype
+
+ def _delete(self, security_group_rule):
+ self.conn.network.delete_security_group_rule(security_group_rule.id)
+
+ def _find(self):
+ # Replacing this code with self.conn.network.find_security_group_rule()
+ # is not possible because the latter requires an id or name.
+ matches = self._find_matches()
+ if len(matches) > 1:
+ self.fail_json(msg='Found more a single matching security group'
+ ' rule which match the given parameters.')
+ elif len(matches) == 1:
+ return self.conn.network.get_security_group_rule(matches[0]['id'])
+ else: # len(matches) == 0
+ return None
+
+ def _find_matches(self):
+ prototype = self._define_prototype()
+
+ security_group = self.conn.network.\
+ get_security_group(prototype['security_group_id'])
+
+ if 'ether_type' in prototype:
+ prototype['ethertype'] = prototype.pop('ether_type')
+
+ if 'protocol' in prototype and prototype['protocol'] in ['tcp', 'udp']:
+ # Check if the user is supplying -1, 1 to 65535 or None values
+ # for full TPC or UDP port range.
+ # (None, None) == (1, 65535) == (-1, -1)
+ if 'port_range_max' in prototype \
+ and prototype['port_range_max'] in [-1, 65535]:
+ prototype.pop('port_range_max')
+ if 'port_range_min' in prototype \
+ and prototype['port_range_min'] in [-1, 1]:
+ prototype.pop('port_range_min')
+
+ return [r for r in security_group.security_group_rules
+ if all(r[k] == prototype[k] for k in prototype.keys())]
+
+ def _will_change(self, state, security_group_rule):
+ if state == 'present' and not security_group_rule:
+ return True
+ elif state == 'present' and security_group_rule:
+ # Only exact matches will cause security_group_rule to be not None
+ return False
+ elif state == 'absent' and security_group_rule:
+ return True
+ else:
+ # state == 'absent' and not security_group_rule:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/security_group_rule_info.py b/ansible_collections/openstack/cloud/plugins/modules/security_group_rule_info.py
index b00f71927..ca9fa2810 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/security_group_rule_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/security_group_rule_info.py
@@ -1,16 +1,16 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2020 by Tino Schreiber (Open Telekom Cloud), operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: security_group_rule_info
-short_description: Querying security group rules
+short_description: Fetch OpenStack network (Neutron) security group rules
author: OpenStack Ansible SIG
description:
- - Querying security group rules
+ - Fetch security group rules from OpenStack network (Neutron) API.
options:
description:
description:
@@ -23,12 +23,18 @@ options:
which the security group rule is applied.
choices: ['egress', 'ingress']
type: str
- ethertype:
+ ether_type:
description:
- - Filter the security group rule list result by the ethertype of
+ - Filter the security group rule list result by the ether_type of
network traffic. The value must be IPv4 or IPv6.
choices: ['IPv4', 'IPv6']
type: str
+ aliases: ['ethertype']
+ id:
+ description:
+ - Filter the list result by the ID of the security group rule.
+ type: str
+ aliases: ['rule']
port_range_min:
description:
- Starting port
@@ -46,7 +52,6 @@ options:
description:
- Filter the security group rule list result by the IP protocol.
type: str
- choices: ['any', 'tcp', 'udp', 'icmp', '112', '132']
remote_group:
description:
- Filter the security group rule list result by the name or ID of the
@@ -60,52 +65,43 @@ options:
description:
- Filter the list result by the revision number of the resource.
type: int
- rule:
- description:
- - Filter the list result by the ID of the security group rule.
- type: str
security_group:
description:
- Name or ID of the security group
type: str
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Get all security group rules
-- openstack.cloud.security_group_rule_info:
- cloud: "{{ cloud }}"
- register: sg
+EXAMPLES = r'''
+- name: Fetch all security group rules
+ openstack.cloud.security_group_rule_info:
+ cloud: devstack
-# Filter security group rules for port 80 and name
-- openstack.cloud.security_group_rule_info:
- cloud: "{{ cloud }}"
- security_group: "{{ rule_name }}"
+- name: Filter security group rules for port 80 and name
+ openstack.cloud.security_group_rule_info:
+ cloud: devstack
+ security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
-# Filter for ICMP rules
-- openstack.cloud.security_group_rule_info:
- cloud: "{{ cloud }}"
+- name: Filter for ICMP rules
+ openstack.cloud.security_group_rule_info:
+ cloud: devstack
protocol: icmp
'''
-RETURN = '''
+RETURN = r'''
security_group_rules:
description: List of dictionaries describing security group rules.
- type: complex
- returned: On Success.
+ type: list
+ elements: dict
+ returned: always
contains:
- id:
- description: Unique rule UUID.
+ created_at:
+ description: Timestamp when the security group rule was created.
type: str
description:
description: Human-readable description of the resource.
@@ -115,37 +111,70 @@ security_group_rules:
description: The direction in which the security group rule is applied.
type: str
sample: 'egress'
- ethertype:
+ ether_type:
description: One of IPv4 or IPv6.
type: str
sample: 'IPv4'
- port_range_min:
- description: The minimum port number in the range that is matched by
- the security group rule.
- type: int
- sample: 8000
+ id:
+ description: Unique rule UUID.
+ type: str
+ name:
+ description: Name of the resource.
+ type: str
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
- project:
- description:
- - Unique ID of the project.
+ port_range_min:
+ description: The minimum port number in the range that is matched by
+ the security group rule.
+ type: int
+ sample: 8000
+ project_id:
+ description: The ID of the project.
type: str
- sample: '16d53a84a13b49529d2e2c3646691123'
+ sample: 'e4f50856753b4dc6afee5fa6b9b6c550'
protocol:
description: The protocol that is matched by the security group rule.
type: str
sample: 'tcp'
+ remote_address_group_id:
+ description: The remote address group ID to be associated with this
+ security group rule.
+ type: str
+ remote_group_id:
+ description: The remote security group ID to be associated with this
+ security group rule.
+ type: str
remote_ip_prefix:
- description: The remote IP prefix to be associated with this security group rule.
+ description: The remote IP prefix to be associated with this security
+ group rule.
+ type: str
+ revision_number:
+ description: The remote IP prefix to be associated with this security
+ group rule.
type: str
sample: '0.0.0.0/0'
security_group_id:
- description: The security group ID to associate with this security group rule.
+ description: The security group ID to associate with this security
+ group rule.
type: str
sample: '729b9660-a20a-41fe-bae6-ed8fa7f69123'
+ tags:
+ description: The security group ID to associate with this security
+ group rule.
+ type: str
+ sample: '729b9660-a20a-41fe-bae6-ed8fa7f69123'
+ tenant_id:
+ description: The ID of the project. Deprecated.
+ type: str
+ sample: 'e4f50856753b4dc6afee5fa6b9b6c550'
+ updated_at:
+ description: Time at which the resource has been updated
+ (in UTC ISO8601 format).
+ type: str
+ sample: '2018-03-19T19:16:56Z'
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
@@ -154,92 +183,56 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
class SecurityGroupRuleInfoModule(OpenStackModule):
argument_spec = dict(
- description=dict(required=False, type='str'),
- direction=dict(required=False,
- type='str',
- choices=['egress', 'ingress']),
- ethertype=dict(required=False,
- type='str',
- choices=['IPv4', 'IPv6']),
- port_range_min=dict(required=False, type='int', min_ver="0.32.0"),
- port_range_max=dict(required=False, type='int', min_ver="0.32.0"),
- project=dict(required=False, type='str'),
- protocol=dict(required=False,
- type='str',
- choices=['any', 'tcp', 'udp', 'icmp', '112', '132']),
- remote_group=dict(required=False, type='str'),
- remote_ip_prefix=dict(required=False, type='str', min_ver="0.32.0"),
- revision_number=dict(required=False, type='int'),
- rule=dict(required=False, type='str'),
- security_group=dict(required=False, type='str')
+ description=dict(),
+ direction=dict(choices=['egress', 'ingress']),
+ ether_type=dict(choices=['IPv4', 'IPv6'], aliases=['ethertype']),
+ id=dict(aliases=['rule']),
+ port_range_min=dict(type='int'),
+ port_range_max=dict(type='int'),
+ project=dict(),
+ protocol=dict(),
+ remote_group=dict(),
+ remote_ip_prefix=dict(),
+ revision_number=dict(type='int'),
+ security_group=dict()
)
module_kwargs = dict(
mutually_exclusive=[
- ['remote_ip_prefix', 'remote_group'],
+ ('remote_ip_prefix', 'remote_group'),
],
supports_check_mode=True
)
def run(self):
- description = self.params['description']
- direction = self.params['direction']
- ethertype = self.params['ethertype']
- project = self.params['project']
- protocol = self.params['protocol']
- remote_group = self.params['remote_group']
- revision_number = self.params['revision_number']
- rule = self.params['rule']
- security_group = self.params['security_group']
-
- changed = False
- filters = self.check_versioned(
- port_range_min=self.params['port_range_min'],
- port_range_max=self.params['port_range_max'],
- remote_ip_prefix=self.params['remote_ip_prefix']
- )
- data = []
-
- if rule:
- sec_rule = self.conn.network.get_security_group_rule(rule)
- if sec_rule is None:
- self.exit(changed=changed, security_group_rules=[])
- self.exit(changed=changed,
- security_group_rules=sec_rule.to_dict())
- # query parameter id is currently not supported
- # PR is open for that.
- # filters['id] = sec_rule.id
- if description:
- filters['description'] = description
- if direction:
- filters['direction'] = direction
- if ethertype:
- filters['ethertype'] = ethertype
- if project:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- filters['project_id'] = proj.id
- if protocol:
- filters['protocol'] = protocol
- if remote_group:
- filters['remote_group_id'] = remote_group
- if revision_number:
- filters['revision_number'] = revision_number
- if security_group:
- sec_grp = self.conn.network.find_security_group(
- name_or_id=security_group,
- ignore_missing=True)
- if sec_grp is None:
- self.fail_json(msg='Security group %s could not be found' % sec_grp)
- filters['security_group_id'] = sec_grp.id
-
- for item in self.conn.network.security_group_rules(**filters):
- item = item.to_dict()
- data.append(item)
-
- self.exit_json(changed=changed,
- security_group_rules=data)
+ filters = dict((k, self.params[k])
+ for k in ['description', 'direction', 'ether_type',
+ 'id', 'port_range_min', 'port_range_max',
+ 'protocol', 'remote_group',
+ 'revision_number', 'remote_ip_prefix']
+ if self.params[k] is not None)
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.find_project(project_name_or_id)
+ if not project:
+ self.exit_json(changed=False, security_group_rules=[])
+ filters['project_id'] = project.id
+
+ security_group_name_or_id = self.params['security_group']
+ if security_group_name_or_id is not None:
+ security_group = self.conn.network.\
+ find_security_group(security_group_name_or_id)
+ if not security_group:
+ self.exit_json(changed=False, security_group_rules=[])
+ filters['security_group_id'] = security_group.id
+
+ security_group_rules = \
+ self.conn.network.security_group_rules(**filters)
+
+ self.exit_json(changed=False,
+ security_group_rules=[r.to_dict(computed=False)
+ for r in security_group_rules])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server.py b/ansible_collections/openstack/cloud/plugins/modules/server.py
index a3ca7d051..d71799023 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat, Inc.
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
@@ -15,194 +15,238 @@ author: OpenStack Ansible SIG
description:
- Create or Remove compute instances from OpenStack.
options:
- name:
- description:
- - Name that has to be given to the instance. It is also possible to
- specify the ID of the instance instead of its name if I(state) is I(absent).
- required: true
- type: str
- image:
- description:
- - The name or id of the base image to boot.
- - Required when I(boot_from_volume=true)
- type: str
- image_exclude:
- description:
- - Text to use to filter image names, for the case, such as HP, where
- there are multiple image names matching the common identifying
- portions. image_exclude is a negative match filter - it is text that
- may not exist in the image name.
- type: str
- default: "(deprecated)"
- flavor:
- description:
+ auto_ip:
+ description:
+ - Ensure instance has public ip however the cloud wants to do that.
+ - For example, the cloud could add a floating ip for the server or
+ attach the server to a public network.
+ - Requires I(wait) to be C(True) during server creation.
+ - Floating IP support is unstable in this module, use with caution.
+ - Options I(auto_ip), I(floating_ip_pools) and I(floating_ips) interact
+ in non-obvious ways and undocumentable depth. For explicit and safe
+ attaching and detaching of floating ip addresses use module
+ I(openstack.cloud.resource) instead.
+ type: bool
+ default: 'true'
+ aliases: ['auto_floating_ip', 'public_ip']
+ availability_zone:
+ description:
+ - Availability zone in which to create the server.
+ - This server attribute cannot be updated.
+ type: str
+ boot_from_volume:
+ description:
+ - Should the instance boot from a persistent volume created based on
+ the image given. Mutually exclusive with boot_volume.
+ - This server attribute cannot be updated.
+ type: bool
+ default: 'false'
+ boot_volume:
+ description:
+ - Volume name or id to use as the volume to boot from. Implies
+ boot_from_volume. Mutually exclusive with image and boot_from_volume.
+ - This server attribute cannot be updated.
+ aliases: ['root_volume']
+ type: str
+ config_drive:
+ description:
+ - Whether to boot the server with config drive enabled.
+ - This server attribute cannot be updated.
+ type: bool
+ default: 'false'
+ delete_ips:
+ description:
+ - When I(state) is C(absent) and this option is true, any floating IP
+ address associated with this server will be deleted along with it.
+ - Floating IP support is unstable in this module, use with caution.
+ type: bool
+ aliases: ['delete_fip']
+ default: 'false'
+ description:
+ description:
+ - Description of the server.
+ type: str
+ flavor:
+ description:
- The name or id of the flavor in which the new instance has to be
created.
- Exactly one of I(flavor) and I(flavor_ram) must be defined when
I(state=present).
- type: str
- flavor_ram:
- description:
- - The minimum amount of ram in MB that the flavor in which the new
- instance has to be created must have.
- - Exactly one of I(flavor) and I(flavor_ram) must be defined when
- I(state=present).
- type: int
- flavor_include:
- description:
+ - This server attribute cannot be updated.
+ type: str
+ flavor_include:
+ description:
- Text to use to filter flavor names, for the case, such as Rackspace,
where there are multiple flavors that have the same ram count.
flavor_include is a positive match filter - it must exist in the
flavor name.
- type: str
- key_name:
- description:
- - The key pair name to be used when creating a instance
- type: str
- security_groups:
- description:
- - Names of the security groups to which the instance should be
- added. This may be a YAML list or a comma separated string.
- type: list
- default: ['default']
- elements: str
- network:
- description:
+ - This server attribute cannot be updated.
+ type: str
+ flavor_ram:
+ description:
+ - The minimum amount of ram in MB that the flavor in which the new
+ instance has to be created must have.
+ - Exactly one of I(flavor) and I(flavor_ram) must be defined when
+ I(state=present).
+ - This server attribute cannot be updated.
+ type: int
+ floating_ip_pools:
+ description:
+ - Name of floating IP pool from which to choose a floating IP.
+ - Requires I(wait) to be C(True) during server creation.
+ - Floating IP support is unstable in this module, use with caution.
+ - Options I(auto_ip), I(floating_ip_pools) and I(floating_ips) interact
+ in non-obvious ways and undocumentable depth. For explicit and safe
+ attaching and detaching of floating ip addresses use module
+ I(openstack.cloud.resource) instead.
+ type: list
+ elements: str
+ floating_ips:
+ description:
+ - list of valid floating IPs that pre-exist to assign to this node.
+ - Requires I(wait) to be C(True) during server creation.
+ - Floating IP support is unstable in this module, use with caution.
+ - Options I(auto_ip), I(floating_ip_pools) and I(floating_ips) interact
+ in non-obvious ways and undocumentable depth. For explicit and safe
+ attaching and detaching of floating ip addresses use module
+ I(openstack.cloud.resource) instead.
+ type: list
+ elements: str
+ image:
+ description:
+ - The name or id of the base image to boot.
+ - Required when I(boot_from_volume=true).
+ - This server attribute cannot be updated.
+ type: str
+ image_exclude:
+ description:
+ - Text to use to filter image names, for the case, such as HP, where
+ there are multiple image names matching the common identifying
+ portions. image_exclude is a negative match filter - it is text that
+ may not exist in the image name.
+ - This server attribute cannot be updated.
+ type: str
+ default: "(deprecated)"
+ key_name:
+ description:
+ - The key pair name to be used when creating a instance.
+ - This server attribute cannot be updated.
+ type: str
+ metadata:
+ description:
+ - 'A list of key value pairs that should be provided as a metadata to
+ the new instance or a string containing a list of key-value pairs.
+ Example: metadata: "key1=value1,key2=value2"'
+ aliases: ['meta']
+ type: raw
+ name:
+ description:
+ - Name that has to be given to the instance. It is also possible to
+ specify the ID of the instance instead of its name if I(state) is
+ I(absent).
+ - This server attribute cannot be updated.
+ required: true
+ type: str
+ network:
+ description:
- Name or ID of a network to attach this instance to. A simpler
- version of the nics parameter, only one of network or nics should
- be supplied.
- type: str
- nics:
- description:
+ version of the I(nics) parameter, only one of I(network) or I(nics)
+ should be supplied.
+ - This server attribute cannot be updated.
+ type: str
+ nics:
+ description:
- A list of networks to which the instance's interface should
be attached. Networks may be referenced by net-id/net-name/port-id
or port-name.
- 'Also this accepts a string containing a list of (net/port)-(id/name)
- Eg: nics: "net-id=uuid-1,port-name=myport"
- Only one of network or nics should be supplied.'
- type: list
- elements: raw
- suboptions:
- tag:
- description:
- - 'A "tag" for the specific port to be passed via metadata.
- Eg: tag: test_tag'
- auto_ip:
- description:
- - Ensure instance has public ip however the cloud wants to do that
- type: bool
- default: 'yes'
- aliases: ['auto_floating_ip', 'public_ip']
- floating_ips:
- description:
- - list of valid floating IPs that pre-exist to assign to this node
- type: list
- elements: str
- floating_ip_pools:
- description:
- - Name of floating IP pool from which to choose a floating IP
- type: list
- elements: str
- meta:
- description:
- - 'A list of key value pairs that should be provided as a metadata to
- the new instance or a string containing a list of key-value pairs.
- Eg: meta: "key1=value1,key2=value2"'
- type: raw
- wait:
- description:
- - If the module should wait for the instance to be created.
- type: bool
- default: 'yes'
- timeout:
- description:
+ Example: C(nics: "net-id=uuid-1,port-name=myport")'
+ - Only one of I(network) or I(nics) should be supplied.
+ - This server attribute cannot be updated.
+ type: list
+ elements: raw
+ default: []
+ suboptions:
+ tag:
+ description:
+ - 'A I(tag) for the specific port to be passed via metadata.
+ Eg: C(tag: test_tag)'
+ reuse_ips:
+ description:
+ - When I(auto_ip) is true and this option is true, the I(auto_ip) code
+ will attempt to re-use unassigned floating ips in the project before
+ creating a new one. It is important to note that it is impossible
+ to safely do this concurrently, so if your use case involves
+ concurrent server creation, it is highly recommended to set this to
+ false and to delete the floating ip associated with a server when
+ the server is deleted using I(delete_ips).
+ - Floating IP support is unstable in this module, use with caution.
+ - This server attribute cannot be updated.
+ type: bool
+ default: 'true'
+ scheduler_hints:
+ description:
+ - Arbitrary key/value pairs to the scheduler for custom use.
+ - This server attribute cannot be updated.
+ type: dict
+ security_groups:
+ description:
+ - Names or IDs of the security groups to which the instance should be
+ added.
+ - On server creation, if I(security_groups) is omitted, the API creates
+ the server in the default security group.
+ - Requested security groups are not applied to pre-existing ports.
+ type: list
+ elements: str
+ default: []
+ state:
+ description:
+ - Should the resource be C(present) or C(absent).
+ choices: [present, absent]
+ default: present
+ type: str
+ terminate_volume:
+ description:
+ - If C(true), delete volume when deleting the instance and if it has
+ been booted from volume(s).
+ - This server attribute cannot be updated.
+ type: bool
+ default: 'false'
+ timeout:
+ description:
- The amount of time the module should wait for the instance to get
into active state.
- default: 180
- type: int
- config_drive:
- description:
- - Whether to boot the server with config drive enabled
- type: bool
- default: 'no'
- userdata:
- description:
- - Opaque blob of data which is made available to the instance
- type: str
- aliases: ['user_data']
- boot_from_volume:
- description:
- - Should the instance boot from a persistent volume created based on
- the image given. Mutually exclusive with boot_volume.
- type: bool
- default: 'no'
- volume_size:
- description:
+ default: 180
+ type: int
+ userdata:
+ description:
+ - Opaque blob of data which is made available to the instance.
+ - This server attribute cannot be updated.
+ type: str
+ volume_size:
+ description:
- The size of the volume to create in GB if booting from volume based
on an image.
- type: int
- boot_volume:
- description:
- - Volume name or id to use as the volume to boot from. Implies
- boot_from_volume. Mutually exclusive with image and boot_from_volume.
- aliases: ['root_volume']
- type: str
- terminate_volume:
- description:
- - If C(yes), delete volume when deleting instance (if booted from volume)
- type: bool
- default: 'no'
- volumes:
- description:
- - A list of preexisting volumes names or ids to attach to the instance
- default: []
- type: list
- elements: str
- scheduler_hints:
- description:
- - Arbitrary key/value pairs to the scheduler for custom use
- type: dict
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
- delete_fip:
- description:
- - When I(state) is absent and this option is true, any floating IP
- associated with the instance will be deleted along with the instance.
- type: bool
- default: 'no'
- reuse_ips:
- description:
- - When I(auto_ip) is true and this option is true, the I(auto_ip) code
- will attempt to re-use unassigned floating ips in the project before
- creating a new one. It is important to note that it is impossible
- to safely do this concurrently, so if your use case involves
- concurrent server creation, it is highly recommended to set this to
- false and to delete the floating ip associated with a server when
- the server is deleted using I(delete_fip).
- type: bool
- default: 'yes'
- availability_zone:
- description:
- - Availability zone in which to create the server.
- type: str
- description:
- description:
- - Description of the server.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ - This server attribute cannot be updated.
+ type: int
+ volumes:
+ description:
+ - A list of preexisting volumes names or ids to attach to the instance
+ - This server attribute cannot be updated.
+ default: []
+ type: list
+ elements: str
+ wait:
+ description:
+ - If the module should wait for the instance to be created.
+ type: bool
+ default: 'true'
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
-- name: Create a new instance and attaches to a network and passes metadata to the instance
+- name: Create a new instance with metadata and attaches it to a network
openstack.cloud.server:
state: present
auth:
@@ -242,8 +286,9 @@ EXAMPLES = '''
key_name: test
timeout: 200
flavor: 101
- security_groups: default
- auto_ip: yes
+ security_groups:
+ - default
+ auto_ip: true
# Create a new instance in named cloud mordred availability zone az2
# and assigns a pre-known floating IP
@@ -307,9 +352,11 @@ EXAMPLES = '''
key_name: ansible_key
timeout: 200
flavor: 4
- nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
+ nics: >-
+ net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,
+ net-id=542f0430-62fe-11e5-9d70-feff819cdc9f
-- name: Creates a new instance and attaches to a network and passes metadata to the instance
+- name: Creates a new instance with metadata and attaches it to a network
openstack.cloud.server:
state: present
auth:
@@ -402,15 +449,13 @@ EXAMPLES = '''
openstack.cloud.server:
name: vm1
state: present
- image: "Ubuntu Server 14.04"
+ image: "Ubuntu Server 22.04"
flavor: "P-1"
network: "Production"
userdata: |
- {%- raw -%}#!/bin/bash
- echo " up ip route add 10.0.0.0/8 via {% endraw -%}{{ intra_router }}{%- raw -%}" >> /etc/network/interfaces.d/eth0.conf
- echo " down ip route del 10.0.0.0/8" >> /etc/network/interfaces.d/eth0.conf
- ifdown eth0 && ifup eth0
- {% endraw %}
+ #!/bin/sh
+ apt update
+ apt -y full-upgrade
# Create a new instance with server group for (anti-)affinity
# server group ID is returned from openstack.cloud.server_group module.
@@ -455,67 +500,340 @@ EXAMPLES = '''
'''
-from ansible_collections.openstack.cloud.plugins.module_utils.openstack import (
- openstack_find_nova_addresses, OpenStackModule)
-
-
-def _parse_nics(nics):
- for net in nics:
- if isinstance(net, str):
- for nic in net.split(','):
- yield dict((nic.split('='),))
- else:
- yield net
-
-
-def _parse_meta(meta):
- if isinstance(meta, str):
- metas = {}
- for kv_str in meta.split(","):
- k, v = kv_str.split("=")
- metas[k] = v
- return metas
- if not meta:
- return {}
- return meta
+RETURN = '''
+server:
+ description: Dictionary describing the server.
+ type: dict
+ returned: On success when I(state) is 'present'.
+ contains:
+ access_ipv4:
+ description: |
+ IPv4 address that should be used to access this server.
+ May be automatically set by the provider.
+ returned: success
+ type: str
+ access_ipv6:
+ description: |
+ IPv6 address that should be used to access this
+ server. May be automatically set by the provider.
+ returned: success
+ type: str
+ addresses:
+ description: |
+ A dictionary of addresses this server can be accessed through.
+ The dictionary contains keys such as 'private' and 'public',
+ each containing a list of dictionaries for addresses of that
+ type. The addresses are contained in a dictionary with keys
+ 'addr' and 'version', which is either 4 or 6 depending on the
+ protocol of the IP address.
+ returned: success
+ type: dict
+ admin_password:
+ description: |
+ When a server is first created, it provides the administrator
+ password.
+ returned: success
+ type: str
+ attached_volumes:
+ description: |
+ A list of an attached volumes. Each item in the list contains
+ at least an 'id' key to identify the specific volumes.
+ returned: success
+ type: list
+ availability_zone:
+ description: |
+ The name of the availability zone this server is a part of.
+ returned: success
+ type: str
+ block_device_mapping:
+ description: |
+ Enables fine grained control of the block device mapping for an
+ instance. This is typically used for booting servers from
+ volumes.
+ returned: success
+ type: str
+ compute_host:
+ description: |
+ The name of the compute host on which this instance is running.
+ Appears in the response for administrative users only.
+ returned: success
+ type: str
+ config_drive:
+ description: |
+ Indicates whether or not a config drive was used for this
+ server.
+ returned: success
+ type: str
+ created_at:
+ description: Timestamp of when the server was created.
+ returned: success
+ type: str
+ description:
+ description: |
+ The description of the server. Before microversion
+ 2.19 this was set to the server name.
+ returned: success
+ type: str
+ disk_config:
+ description: The disk configuration. Either AUTO or MANUAL.
+ returned: success
+ type: str
+ flavor:
+ description: The flavor property as returned from server.
+ returned: success
+ type: dict
+ flavor_id:
+ description: |
+ The flavor reference, as a ID or full URL, for the flavor to
+ use for this server.
+ returned: success
+ type: str
+ has_config_drive:
+ description: |
+ Indicates whether a configuration drive enables metadata
+ injection. Not all cloud providers enable this feature.
+ returned: success
+ type: str
+ host_id:
+ description: An ID representing the host of this server.
+ returned: success
+ type: str
+ host_status:
+ description: The host status.
+ returned: success
+ type: str
+ hostname:
+ description: |
+ The hostname set on the instance when it is booted.
+ By default, it appears in the response for administrative users
+ only.
+ returned: success
+ type: str
+ hypervisor_hostname:
+ description: |
+ The hypervisor host name. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ id:
+ description: ID of the server.
+ returned: success
+ type: str
+ image:
+ description: The image property as returned from server.
+ returned: success
+ type: dict
+ image_id:
+ description: |
+ The image reference, as a ID or full URL, for the image to use
+ for this server.
+ returned: success
+ type: str
+ instance_name:
+ description: |
+ The instance name. The Compute API generates the instance name
+ from the instance name template. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ is_locked:
+ description: The locked status of the server
+ returned: success
+ type: bool
+ kernel_id:
+ description: |
+ The UUID of the kernel image when using an AMI. Will be null if
+ not. By default, it appears in the response for administrative
+ users only.
+ returned: success
+ type: str
+ key_name:
+ description: The name of an associated keypair.
+ returned: success
+ type: str
+ launch_index:
+ description: |
+ When servers are launched via multiple create, this is the
+ sequence in which the servers were launched. By default, it
+ appears in the response for administrative users only.
+ returned: success
+ type: int
+ launched_at:
+ description: The timestamp when the server was launched.
+ returned: success
+ type: str
+ links:
+ description: |
+ A list of dictionaries holding links relevant to this server.
+ returned: success
+ type: str
+ max_count:
+ description: The maximum number of servers to create.
+ returned: success
+ type: str
+ metadata:
+ description: List of tag strings.
+ returned: success
+ type: dict
+ min_count:
+ description: The minimum number of servers to create.
+ returned: success
+ type: str
+ name:
+ description: Name of the server
+ returned: success
+ type: str
+ networks:
+ description: |
+ A networks object. Required parameter when there are multiple
+ networks defined for the tenant. When you do not specify the
+ networks parameter, the server attaches to the only network
+ created for the current tenant.
+ returned: success
+ type: str
+ power_state:
+ description: The power state of this server.
+ returned: success
+ type: str
+ progress:
+ description: |
+ While the server is building, this value represents the
+ percentage of completion. Once it is completed, it will be 100.
+ returned: success
+ type: int
+ project_id:
+ description: The ID of the project this server is associated with.
+ returned: success
+ type: str
+ ramdisk_id:
+ description: |
+ The UUID of the ramdisk image when using an AMI. Will be null
+ if not. By default, it appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ reservation_id:
+ description: |
+ The reservation id for the server. This is an id that can be
+ useful in tracking groups of servers created with multiple
+ create, that will all have the same reservation_id. By default,
+ it appears in the response for administrative users only.
+ returned: success
+ type: str
+ root_device_name:
+ description: |
+ The root device name for the instance By default, it appears in
+ the response for administrative users only.
+ returned: success
+ type: str
+ scheduler_hints:
+ description: The dictionary of data to send to the scheduler.
+ returned: success
+ type: dict
+ security_groups:
+ description: |
+ A list of applicable security groups. Each group contains keys
+ for: description, name, id, and rules.
+ returned: success
+ type: list
+ elements: dict
+ server_groups:
+ description: |
+ The UUIDs of the server groups to which the server belongs.
+ Currently this can contain at most one entry.
+ returned: success
+ type: list
+ status:
+ description: |
+ The state this server is in. Valid values include 'ACTIVE',
+ 'BUILDING', 'DELETED', 'ERROR', 'HARD_REBOOT', 'PASSWORD',
+ 'PAUSED', 'REBOOT', 'REBUILD', 'RESCUED', 'RESIZED',
+ 'REVERT_RESIZE', 'SHUTOFF', 'SOFT_DELETED', 'STOPPED',
+ 'SUSPENDED', 'UNKNOWN', or 'VERIFY_RESIZE'.
+ returned: success
+ type: str
+ tags:
+ description: A list of associated tags.
+ returned: success
+ type: list
+ task_state:
+ description: The task state of this server.
+ returned: success
+ type: str
+ terminated_at:
+ description: |
+ The timestamp when the server was terminated (if it has been).
+ returned: success
+ type: str
+ trusted_image_certificates:
+ description: |
+ A list of trusted certificate IDs, that were used during image
+ signature verification to verify the signing certificate.
+ returned: success
+ type: list
+ updated_at:
+ description: Timestamp of when this server was last updated.
+ returned: success
+ type: str
+ user_data:
+ description: |
+ Configuration information or scripts to use upon launch.
+ Base64 encoded.
+ returned: success
+ type: str
+ user_id:
+ description: The ID of the owners of this server.
+ returned: success
+ type: str
+ vm_state:
+ description: The VM state of this server.
+ returned: success
+ type: str
+ volumes:
+ description: Same as attached_volumes.
+ returned: success
+ type: list
+'''
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+import copy
class ServerModule(OpenStackModule):
- deprecated_names = ('os_server', 'openstack.cloud.os_server')
argument_spec = dict(
- name=dict(required=True),
- image=dict(default=None),
+ auto_ip=dict(default=True, type='bool',
+ aliases=['auto_floating_ip', 'public_ip']),
+ availability_zone=dict(),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(aliases=['root_volume']),
+ config_drive=dict(default=False, type='bool'),
+ delete_ips=dict(default=False, type='bool', aliases=['delete_fip']),
+ description=dict(),
+ flavor=dict(),
+ flavor_include=dict(),
+ flavor_ram=dict(type='int'),
+ floating_ip_pools=dict(type='list', elements='str'),
+ floating_ips=dict(type='list', elements='str'),
+ image=dict(),
image_exclude=dict(default='(deprecated)'),
- flavor=dict(default=None),
- flavor_ram=dict(default=None, type='int'),
- flavor_include=dict(default=None),
- key_name=dict(default=None),
- security_groups=dict(default=['default'], type='list', elements='str'),
- network=dict(default=None),
+ key_name=dict(),
+ metadata=dict(type='raw', aliases=['meta']),
+ name=dict(required=True),
+ network=dict(),
nics=dict(default=[], type='list', elements='raw'),
- meta=dict(default=None, type='raw'),
- userdata=dict(default=None, aliases=['user_data']),
- config_drive=dict(default=False, type='bool'),
- auto_ip=dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
- floating_ips=dict(default=None, type='list', elements='str'),
- floating_ip_pools=dict(default=None, type='list', elements='str'),
- volume_size=dict(default=None, type='int'),
- boot_from_volume=dict(default=False, type='bool'),
- boot_volume=dict(default=None, aliases=['root_volume']),
+ reuse_ips=dict(default=True, type='bool'),
+ scheduler_hints=dict(type='dict'),
+ security_groups=dict(default=[], type='list', elements='str'),
+ state=dict(default='present', choices=['absent', 'present']),
terminate_volume=dict(default=False, type='bool'),
+ userdata=dict(),
+ volume_size=dict(type='int'),
volumes=dict(default=[], type='list', elements='str'),
- scheduler_hints=dict(default=None, type='dict'),
- state=dict(default='present', choices=['absent', 'present']),
- delete_fip=dict(default=False, type='bool'),
- reuse_ips=dict(default=True, type='bool'),
- description=dict(default=None, type='str'),
)
+
module_kwargs = dict(
mutually_exclusive=[
- ['auto_ip', 'floating_ips'],
- ['auto_ip', 'floating_ip_pools'],
- ['floating_ips', 'floating_ip_pools'],
+ ['auto_ip', 'floating_ips', 'floating_ip_pools'],
['flavor', 'flavor_ram'],
['image', 'boot_volume'],
['boot_from_volume', 'boot_volume'],
@@ -523,277 +841,405 @@ class ServerModule(OpenStackModule):
],
required_if=[
('boot_from_volume', True, ['volume_size', 'image']),
+ ('state', 'present', ('image', 'boot_volume'), True),
+ ('state', 'present', ('flavor', 'flavor_ram'), True),
],
+ supports_check_mode=True,
)
def run(self):
-
- state = self.params['state']
- image = self.params['image']
- boot_volume = self.params['boot_volume']
- flavor = self.params['flavor']
- flavor_ram = self.params['flavor_ram']
-
- if state == 'present':
- if not (image or boot_volume):
- self.fail(
- msg="Parameter 'image' or 'boot_volume' is required "
- "if state == 'present'"
- )
- if not flavor and not flavor_ram:
- self.fail(
- msg="Parameter 'flavor' or 'flavor_ram' is required "
- "if state == 'present'"
- )
-
- if state == 'present':
- self._get_server_state()
- self._create_server()
- elif state == 'absent':
- self._get_server_state()
- self._delete_server()
-
- def _exit_hostvars(self, server, changed=True):
- hostvars = self.conn.get_openstack_vars(server)
- self.exit(
- changed=changed, server=server, id=server.id, openstack=hostvars)
-
- def _get_server_state(self):
state = self.params['state']
- server = self.conn.get_server(self.params['name'])
- if server and state == 'present':
- if server.status not in ('ACTIVE', 'SHUTOFF', 'PAUSED', 'SUSPENDED'):
- self.fail(
- msg="The instance is available but not Active state: " + server.status)
- (ip_changed, server) = self._check_ips(server)
- (sg_changed, server) = self._check_security_groups(server)
- (server_changed, server) = self._update_server(server)
- self._exit_hostvars(server, ip_changed or sg_changed or server_changed)
- if server and state == 'absent':
- return True
- if state == 'absent':
- self.exit(changed=False, result="not present")
- return True
- def _create_server(self):
- flavor = self.params['flavor']
- flavor_ram = self.params['flavor_ram']
- flavor_include = self.params['flavor_include']
+ server = self.conn.compute.find_server(self.params['name'])
+ if server:
+ # fetch server details such as server['addresses']
+ server = self.conn.compute.get_server(server)
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, server))
+
+ if state == 'present' and not server:
+ # Create server
+ server = self._create()
+ self.exit_json(changed=True,
+ server=server.to_dict(computed=False))
+
+ elif state == 'present' and server:
+ # Update server
+ update = self._build_update(server)
+ if update:
+ server = self._update(server, update)
+
+ self.exit_json(changed=bool(update),
+ server=server.to_dict(computed=False))
+
+ elif state == 'absent' and server:
+ # Delete server
+ self._delete(server)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not server:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, server):
+ if server.status not in ('ACTIVE', 'SHUTOFF', 'PAUSED', 'SUSPENDED'):
+ self.fail_json(msg="The instance is available but not "
+ "active state: {0}".format(server.status))
+
+ return {
+ **self._build_update_ips(server),
+ **self._build_update_security_groups(server),
+ **self._build_update_server(server)}
+
+ def _build_update_ips(self, server):
+ auto_ip = self.params['auto_ip']
+ floating_ips = self.params['floating_ips']
+ floating_ip_pools = self.params['floating_ip_pools']
+
+ if not (auto_ip or floating_ips or floating_ip_pools):
+ # No floating ip has been requested, so
+ # do not add or remove any floating ip.
+ return {}
+
+ # Get floating ip addresses attached to the server
+ ips = [interface_spec['addr']
+ for v in server['addresses'].values()
+ for interface_spec in v
+ if interface_spec.get('OS-EXT-IPS:type', None) == 'floating']
+
+ if (auto_ip and ips and not floating_ip_pools and not floating_ips):
+ # Server has a floating ip address attached and
+ # no specific floating ip has been requested,
+ # so nothing to change.
+ return {}
+
+ if not ips:
+ # One or multiple floating ips have been requested,
+ # but none have been attached, so attach them.
+ return dict(ips=dict(
+ auto_ip=auto_ip,
+ ips=floating_ips,
+ ip_pool=floating_ip_pools))
+
+ if auto_ip or not floating_ips:
+ # Nothing do to because either any floating ip address
+ # or no specific floating ips have been requested
+ # and any floating ip has been attached.
+ return {}
+
+ # A specific set of floating ips has been requested
+ update = {}
+ add_ips = [ip for ip in floating_ips if ip not in ips]
+ if add_ips:
+ # add specific ips which have not been added
+ update['add_ips'] = add_ips
+
+ remove_ips = [ip for ip in ips if ip not in floating_ips]
+ if remove_ips:
+ # Detach ips which are not supposed to be attached
+ update['remove_ips'] = remove_ips
+
+ def _build_update_security_groups(self, server):
+ update = {}
+
+ required_security_groups = dict(
+ (sg['id'], sg) for sg in [
+ self.conn.network.find_security_group(
+ security_group_name_or_id, ignore_missing=False)
+ for security_group_name_or_id in self.params['security_groups']
+ ])
+
+ # Retrieve IDs of security groups attached to the server
+ server = self.conn.compute.fetch_server_security_groups(server)
+ assigned_security_groups = dict(
+ (sg['id'], self.conn.network.get_security_group(sg['id']))
+ for sg in server.security_groups)
+
+ # openstacksdk adds security groups to server using resources
+ add_security_groups = [
+ sg for (sg_id, sg) in required_security_groups.items()
+ if sg_id not in assigned_security_groups]
+
+ if add_security_groups:
+ update['add_security_groups'] = add_security_groups
+
+ # openstacksdk removes security groups from servers using resources
+ remove_security_groups = [
+ sg for (sg_id, sg) in assigned_security_groups.items()
+ if sg_id not in required_security_groups]
+
+ if remove_security_groups:
+ update['remove_security_groups'] = remove_security_groups
+
+ return update
+
+ def _build_update_server(self, server):
+ update = {}
+
+ # Process metadata
+ required_metadata = self._parse_metadata(self.params['metadata'])
+ assigned_metadata = server.metadata
+
+ add_metadata = dict()
+ for (k, v) in required_metadata.items():
+ if k not in assigned_metadata or assigned_metadata[k] != v:
+ add_metadata[k] = v
+
+ if add_metadata:
+ update['add_metadata'] = add_metadata
+
+ remove_metadata = dict()
+ for (k, v) in assigned_metadata.items():
+ if k not in required_metadata or required_metadata[k] != v:
+ remove_metadata[k] = v
+
+ if remove_metadata:
+ update['remove_metadata'] = remove_metadata
+
+ # Process server attributes
+
+ # Updateable server attributes in openstacksdk
+ # (OpenStack API names in braces):
+ # - access_ipv4 (accessIPv4)
+ # - access_ipv6 (accessIPv6)
+ # - name (name)
+ # - hostname (hostname)
+ # - disk_config (OS-DCF:diskConfig)
+ # - description (description)
+ # Ref.: https://docs.openstack.org/api-ref/compute/#update-server
+
+ # A server's name cannot be updated by this module because
+ # it is used to find servers by name or id.
+ # If name is an id, then we do not have a name to update.
+ # If name is a name actually, then it was used to find a
+ # matching server hence the name is the user defined one
+ # already.
+
+ # Update all known updateable attributes although
+ # our module might not support them yet
+ server_attributes = dict(
+ (k, self.params[k])
+ for k in ['access_ipv4', 'access_ipv6', 'hostname', 'disk_config',
+ 'description']
+ if k in self.params and self.params[k] is not None
+ and self.params[k] != server[k])
+
+ if server_attributes:
+ update['server_attributes'] = server_attributes
+
+ return update
+
+ def _create(self):
+ for k in ['auto_ip', 'floating_ips', 'floating_ip_pools']:
+ if self.params[k] is not None \
+ and self.params['wait'] is False:
+ # floating ip addresses will only be added if
+ # we wait until the server has been created
+ # Ref.: https://opendev.org/openstack/openstacksdk/src/commit/3f81d0001dd994cde990d38f6e2671ee0694d7d5/openstack/cloud/_compute.py#L945
+ self.fail_json(
+ msg="Option '{0}' requires 'wait: true'".format(k))
+
+ flavor_name_or_id = self.params['flavor']
image_id = None
if not self.params['boot_volume']:
image_id = self.conn.get_image_id(
self.params['image'], self.params['image_exclude'])
if not image_id:
- self.fail(
- msg="Could not find image %s" % self.params['image'])
+ self.fail_json(
+ msg="Could not find image {0} with exclude {1}".format(
+ self.params['image'], self.params['image_exclude']))
- if flavor:
- flavor_dict = self.conn.get_flavor(flavor)
- if not flavor_dict:
- self.fail(msg="Could not find flavor %s" % flavor)
+ if flavor_name_or_id:
+ flavor = self.conn.compute.find_flavor(flavor_name_or_id,
+ ignore_missing=False)
else:
- flavor_dict = self.conn.get_flavor_by_ram(flavor_ram, flavor_include)
- if not flavor_dict:
- self.fail(msg="Could not find any matching flavor")
+ flavor = self.conn.get_flavor_by_ram(self.params['flavor_ram'],
+ self.params['flavor_include'])
+ if not flavor:
+ self.fail_json(msg="Could not find any matching flavor")
- nics = self._network_args()
-
- self.params['meta'] = _parse_meta(self.params['meta'])
-
- bootkwargs = self.check_versioned(
- name=self.params['name'],
+ args = dict(
+ flavor=flavor.id,
image=image_id,
- flavor=flavor_dict['id'],
- nics=nics,
- meta=self.params['meta'],
- security_groups=self.params['security_groups'],
- userdata=self.params['userdata'],
- config_drive=self.params['config_drive'],
- )
- for optional_param in (
- 'key_name', 'availability_zone', 'network',
- 'scheduler_hints', 'volume_size', 'volumes',
- 'description'):
- if self.params[optional_param]:
- bootkwargs[optional_param] = self.params[optional_param]
-
- server = self.conn.create_server(
ip_pool=self.params['floating_ip_pools'],
ips=self.params['floating_ips'],
- auto_ip=self.params['auto_ip'],
- boot_volume=self.params['boot_volume'],
- boot_from_volume=self.params['boot_from_volume'],
- terminate_volume=self.params['terminate_volume'],
- reuse_ips=self.params['reuse_ips'],
- wait=self.params['wait'], timeout=self.params['timeout'],
- **bootkwargs
+ meta=self._parse_metadata(self.params['metadata']),
+ nics=self._parse_nics(),
)
- self._exit_hostvars(server)
-
- def _update_server(self, server):
- changed = False
-
- self.params['meta'] = _parse_meta(self.params['meta'])
-
- # self.conn.set_server_metadata only updates the key=value pairs, it doesn't
- # touch existing ones
- update_meta = {}
- for (k, v) in self.params['meta'].items():
- if k not in server.metadata or server.metadata[k] != v:
- update_meta[k] = v
-
- if update_meta:
- self.conn.set_server_metadata(server, update_meta)
- changed = True
- # Refresh server vars
- server = self.conn.get_server(self.params['name'])
-
- return (changed, server)
-
- def _delete_server(self):
- try:
- self.conn.delete_server(
- self.params['name'], wait=self.params['wait'],
- timeout=self.params['timeout'],
- delete_ips=self.params['delete_fip'])
- except Exception as e:
- self.fail(msg="Error in deleting vm: %s" % e)
- self.exit(changed=True, result='deleted')
-
- def _network_args(self):
- args = []
- nics = self.params['nics']
-
- if not isinstance(nics, list):
- self.fail(msg='The \'nics\' parameter must be a list.')
-
- for num, net in enumerate(_parse_nics(nics)):
+ for k in ['auto_ip', 'availability_zone', 'boot_from_volume',
+ 'boot_volume', 'config_drive', 'description', 'key_name',
+ 'name', 'network', 'reuse_ips', 'scheduler_hints',
+ 'security_groups', 'terminate_volume', 'timeout',
+ 'userdata', 'volume_size', 'volumes', 'wait']:
+ if self.params[k] is not None:
+ args[k] = self.params[k]
+
+ server = self.conn.create_server(**args)
+
+ # openstacksdk's create_server() might call meta.add_server_interfaces(
+ # ) which alters server attributes such as server['addresses']. So we
+ # do an extra call to compute.get_server() to return a clean server
+ # resource.
+ # Ref.: https://opendev.org/openstack/openstacksdk/src/commit/3f81d0001dd994cde990d38f6e2671ee0694d7d5/openstack/cloud/_compute.py#L942
+ return self.conn.compute.get_server(server)
+
+ def _delete(self, server):
+ self.conn.delete_server(
+ server.id,
+ **dict((k, self.params[k])
+ for k in ['wait', 'timeout', 'delete_ips']))
+
+ def _update(self, server, update):
+ server = self._update_ips(server, update)
+ server = self._update_security_groups(server, update)
+ server = self._update_server(server, update)
+ # Refresh server attributes after security groups etc. have changed
+ #
+ # Use compute.get_server() instead of compute.find_server()
+ # to include server details
+ return self.conn.compute.get_server(server)
+
+ def _update_ips(self, server, update):
+ args = dict((k, self.params[k]) for k in ['wait', 'timeout'])
+ ips = update.get('ips')
+ if ips:
+ server = self.conn.add_ips_to_server(server, **ips, **args)
+
+ add_ips = update.get('add_ips')
+ if add_ips:
+ # Add specific ips which have not been added
+ server = self.conn.add_ip_list(server, add_ips, **args)
+
+ remove_ips = update.get('remove_ips')
+ if remove_ips:
+ # Detach ips which are not supposed to be attached
+ for ip in remove_ips:
+ ip_id = self.conn.network.find_ip(name_or_id=ip,
+ ignore_missing=False).id
+ # self.conn.network.update_ip(ip_id, port_id=None) does not
+ # handle nova network but self.conn.detach_ip_from_server()
+ # does so
+ self.conn.detach_ip_from_server(server_id=server.id,
+ floating_ip_id=ip_id)
+ return server
+
+ def _update_security_groups(self, server, update):
+ add_security_groups = update.get('add_security_groups')
+ if add_security_groups:
+ for sg in add_security_groups:
+ self.conn.compute.add_security_group_to_server(server, sg)
+
+ remove_security_groups = update.get('remove_security_groups')
+ if remove_security_groups:
+ for sg in remove_security_groups:
+ self.conn.compute.remove_security_group_from_server(server, sg)
+
+ # Whenever security groups of a server have changed,
+ # the server object has to be refreshed. This will
+ # be postponed until all updates have been applied.
+ return server
+
+ def _update_server(self, server, update):
+ add_metadata = update.get('add_metadata')
+ if add_metadata:
+ self.conn.compute.set_server_metadata(server.id,
+ **add_metadata)
+
+ remove_metadata = update.get('remove_metadata')
+ if remove_metadata:
+ self.conn.compute.delete_server_metadata(server.id,
+ remove_metadata.keys())
+
+ server_attributes = update.get('server_attributes')
+ if server_attributes:
+ # Server object cannot passed to self.conn.compute.update_server()
+ # entirely because its security_groups attribute was expanded by
+ # self.conn.compute.fetch_server_security_groups() previously which
+ # thus will no longer have a valid value for OpenStack API.
+ server = self.conn.compute.update_server(server['id'],
+ **server_attributes)
+
+ # Whenever server attributes such as metadata have changed,
+ # the server object has to be refreshed. This will
+ # be postponed until all updates have been applied.
+ return server
+
+ def _parse_metadata(self, metadata):
+ if not metadata:
+ return {}
+
+ if isinstance(metadata, str):
+ metas = {}
+ for kv_str in metadata.split(","):
+ k, v = kv_str.split("=")
+ metas[k] = v
+ return metas
+
+ return metadata
+
+ def _parse_nics(self):
+ nics = []
+ stringified_nets = self.params['nics']
+
+ if not isinstance(stringified_nets, list):
+ self.fail_json(msg="The 'nics' parameter must be a list.")
+
+ nets = [(dict((nested_net.split('='),))
+ for nested_net in net.split(','))
+ if isinstance(net, str) else net
+ for net in stringified_nets]
+
+ for net in nets:
if not isinstance(net, dict):
- self.fail(
- msg='Each entry in the \'nics\' parameter must be a dict.')
+ self.fail_json(
+ msg="Each entry in the 'nics' parameter must be a dict.")
if net.get('net-id'):
- args.append(net)
+ nics.append(net)
elif net.get('net-name'):
- by_name = self.conn.get_network(net['net-name'])
- if not by_name:
- self.fail(
- msg='Could not find network by net-name: %s' %
- net['net-name'])
- resolved_net = net.copy()
- del resolved_net['net-name']
- resolved_net['net-id'] = by_name['id']
- args.append(resolved_net)
+ network_id = self.conn.network.find_network(
+ net['net-name'], ignore_missing=False).id
+ # Replace net-name with net-id and keep optional nic args
+ # Ref.: https://github.com/ansible/ansible/pull/20969
+ #
+ # Delete net-name from a copy else it will
+ # disappear from Ansible's debug output
+ net = copy.deepcopy(net)
+ del net['net-name']
+ net['net-id'] = network_id
+ nics.append(net)
elif net.get('port-id'):
- args.append(net)
+ nics.append(net)
elif net.get('port-name'):
- by_name = self.conn.get_port(net['port-name'])
- if not by_name:
- self.fail(
- msg='Could not find port by port-name: %s' %
- net['port-name'])
- resolved_net = net.copy()
- del resolved_net['port-name']
- resolved_net['port-id'] = by_name['id']
- args.append(resolved_net)
+ port_id = self.conn.network.find_port(
+ net['port-name'], ignore_missing=False).id
+ # Replace net-name with net-id and keep optional nic args
+ # Ref.: https://github.com/ansible/ansible/pull/20969
+ #
+ # Delete net-name from a copy else it will
+ # disappear from Ansible's debug output
+ net = copy.deepcopy(net)
+ del net['port-name']
+ net['port-id'] = port_id
+ nics.append(net)
if 'tag' in net:
- args[num]['tag'] = net['tag']
- return args
-
- def _detach_ip_list(self, server, extra_ips):
- for ip in extra_ips:
- ip_id = self.conn.get_floating_ip(
- id=None, filters={'floating_ip_address': ip})
- self.conn.detach_ip_from_server(
- server_id=server.id, floating_ip_id=ip_id)
-
- def _check_ips(self, server):
- changed = False
-
- auto_ip = self.params['auto_ip']
- floating_ips = self.params['floating_ips']
- floating_ip_pools = self.params['floating_ip_pools']
+ nics[-1]['tag'] = net['tag']
+ return nics
- if floating_ip_pools or floating_ips:
- ips = openstack_find_nova_addresses(server.addresses, 'floating')
- if not ips:
- # If we're configured to have a floating but we don't have one,
- # let's add one
- server = self.conn.add_ips_to_server(
- server,
- auto_ip=auto_ip,
- ips=floating_ips,
- ip_pool=floating_ip_pools,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- changed = True
- elif floating_ips:
- # we were configured to have specific ips, let's make sure we have
- # those
- missing_ips = []
- for ip in floating_ips:
- if ip not in ips:
- missing_ips.append(ip)
- if missing_ips:
- server = self.conn.add_ip_list(server, missing_ips,
- wait=self.params['wait'],
- timeout=self.params['timeout'])
- changed = True
- extra_ips = []
- for ip in ips:
- if ip not in floating_ips:
- extra_ips.append(ip)
- if extra_ips:
- self._detach_ip_list(server, extra_ips)
- changed = True
- elif auto_ip:
- if server['interface_ip']:
- changed = False
- else:
- # We're configured for auto_ip but we're not showing an
- # interface_ip. Maybe someone deleted an IP out from under us.
- server = self.conn.add_ips_to_server(
- server,
- auto_ip=auto_ip,
- ips=floating_ips,
- ip_pool=floating_ip_pools,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- changed = True
- return (changed, server)
-
- def _check_security_groups(self, server):
- changed = False
-
- # server security groups were added to shade in 1.19. Until then this
- # module simply ignored trying to update security groups and only set them
- # on newly created hosts.
- if not (
- hasattr(self.conn, 'add_server_security_groups')
- and hasattr(self.conn, 'remove_server_security_groups')
- ):
- return changed, server
-
- module_security_groups = set(self.params['security_groups'])
- server_security_groups = set(sg['name'] for sg in server.security_groups)
-
- add_sgs = module_security_groups - server_security_groups
- remove_sgs = server_security_groups - module_security_groups
-
- if add_sgs:
- self.conn.add_server_security_groups(server, list(add_sgs))
- changed = True
-
- if remove_sgs:
- self.conn.remove_server_security_groups(server, list(remove_sgs))
- changed = True
-
- return (changed, server)
+ def _will_change(self, state, server):
+ if state == 'present' and not server:
+ return True
+ elif state == 'present' and server:
+ return bool(self._build_update(server))
+ elif state == 'absent' and server:
+ return True
+ else:
+ # state == 'absent' and not server:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server_action.py b/ansible_collections/openstack/cloud/plugins/modules/server_action.py
index 341ff3742..4b1a0fc43 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server_action.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server_action.py
@@ -1,230 +1,211 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: server_action
-short_description: Perform actions on Compute Instances from OpenStack
+short_description: Perform actions on OpenStack compute (Nova) instances
author: OpenStack Ansible SIG
description:
- - Perform server actions on an existing compute instance from OpenStack.
- This module does not return any data other than changed true/false.
- When I(action) is 'rebuild', then I(image) parameter is required.
+ - Perform actions on OpenStack compute (Nova) instances aka servers.
options:
- server:
- description:
- - Name or ID of the instance
- required: true
- type: str
- wait:
- description:
- - If the module should wait for the instance action to be performed.
- type: bool
- default: 'yes'
- timeout:
- description:
- - The amount of time the module should wait for the instance to perform
- the requested action.
- default: 180
- type: int
- action:
- description:
- - Perform the given action. The lock and unlock actions always return
- changed as the servers API does not provide lock status.
- choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
- rebuild, shelve, shelve_offload, unshelve]
- type: str
- required: true
- image:
- description:
- - Image the server should be rebuilt with
- type: str
- admin_password:
- description:
- - Admin password for server to rebuild
- type: str
- all_projects:
- description:
- - Whether to search for server in all projects or just the current
- auth scoped project.
- type: bool
- default: 'no'
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ action:
+ description:
+ - Action to perform.
+ - By default, only server owners and administrators are allowed to
+ perform actions C(pause), C(unpause), C(suspend), C(resume), C(lock),
+ C(unlock) and C(shelve_offload).
+ choices: [lock, pause, reboot_hard, reboot_soft, rebuild, resume, shelve,
+ shelve_offload, start, stop, suspend, unlock, unpause, unshelve]
+ type: str
+ required: true
+ admin_password:
+ description:
+ - Admin password for server to rebuild.
+ type: str
+ all_projects:
+ description:
+ - Whether to search for server in all projects or the current project
+ only.
+ type: bool
+ default: false
+ image:
+ description:
+ - Image name or ID the server should be rebuilt with.
+ type: str
+ name:
+ description:
+ - Server name or ID.
+ required: true
+ type: str
+ aliases: ['server']
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Pauses a compute instance
-- openstack.cloud.server_action:
- action: pause
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
- server: vm1
- timeout: 200
+EXAMPLES = r'''
+- name: Pauses a compute instance
+ openstack.cloud.server_action:
+ cloud: devstack-admin
+ action: pause
+ server: vm1
+ timeout: 200
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
-# If I(action) is set to C(shelve) then according to OpenStack's Compute API, the shelved
-# server is in one of two possible states:
-#
-# SHELVED: The server is in shelved state. Depends on the shelve offload time,
-# the server will be automatically shelved off loaded.
-# SHELVED_OFFLOADED: The shelved server is offloaded (removed from the compute host) and
-# it needs unshelved action to be used again.
-#
-# But wait_for_server can only wait for a single server state. If a shelved server is offloaded
-# immediately, then a exceptions.ResourceTimeout will be raised if I(action) is set to C(shelve).
-# This is likely to happen because shelved_offload_time in Nova's config is set to 0 by default.
-# This also applies if you boot the server from volumes.
-#
-# Calling C(shelve_offload) instead of C(shelve) will also fail most likely because the default
-# policy does not allow C(shelve_offload) for non-admin users while C(shelve) is allowed for
-# admin users and server owners.
-#
-# As we cannot retrieve shelved_offload_time from Nova's config, we fall back to waiting for
-# one state and if that fails then we fetch the server's state and match it against the other
-# valid states from _action_map.
-#
-# Ref.: https://docs.openstack.org/api-guide/compute/server_concepts.html
-
-_action_map = {'stop': ['SHUTOFF'],
- 'start': ['ACTIVE'],
- 'pause': ['PAUSED'],
- 'unpause': ['ACTIVE'],
- 'lock': ['ACTIVE'], # API doesn't show lock/unlock status
- 'unlock': ['ACTIVE'],
- 'suspend': ['SUSPENDED'],
- 'resume': ['ACTIVE'],
- 'rebuild': ['ACTIVE'],
- 'shelve': ['SHELVED_OFFLOADED', 'SHELVED'],
- 'shelve_offload': ['SHELVED_OFFLOADED'],
- 'unshelve': ['ACTIVE']}
-
-_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock', 'shelve_offload']
-
class ServerActionModule(OpenStackModule):
- deprecated_names = ('os_server_action', 'openstack.cloud.os_server_action')
-
argument_spec = dict(
- server=dict(required=True, type='str'),
- action=dict(required=True, type='str',
+ action=dict(required=True,
choices=['stop', 'start', 'pause', 'unpause',
- 'lock', 'unlock', 'suspend', 'resume',
- 'rebuild', 'shelve', 'shelve_offload', 'unshelve']),
- image=dict(required=False, type='str'),
- admin_password=dict(required=False, type='str', no_log=True),
- all_projects=dict(required=False, type='bool', default=False),
+ 'lock', 'unlock', 'suspend', 'reboot_soft',
+ 'reboot_hard', 'resume', 'rebuild', 'shelve',
+ 'shelve_offload', 'unshelve']),
+ admin_password=dict(no_log=True),
+ all_projects=dict(type='bool', default=False),
+ image=dict(),
+ name=dict(required=True, aliases=['server']),
)
+
module_kwargs = dict(
required_if=[('action', 'rebuild', ['image'])],
supports_check_mode=True,
)
+ # If I(action) is set to C(shelve) then according to OpenStack's Compute
+ # API, the shelved server is in one of two possible states:
+ #
+ # SHELVED: The server is in shelved state. Depends on the shelve
+ # offload time, the server will be automatically
+ # shelved offloaded.
+ # SHELVED_OFFLOADED: The shelved server is offloaded (removed from the
+ # compute host) and it needs unshelved action to be
+ # used again.
+ #
+ # But wait_for_server can only wait for a single server state. If a shelved
+ # server is offloaded immediately, then a exceptions.ResourceTimeout will
+ # be raised if I(action) is set to C(shelve). This is likely to happen
+ # because shelved_offload_time in Nova's config is set to 0 by default.
+ # This also applies if you boot the server from volumes.
+ #
+ # Calling C(shelve_offload) instead of C(shelve) will also fail most likely
+ # because the default policy does not allow C(shelve_offload) for non-admin
+ # users while C(shelve) is allowed for admin users and server owners.
+ #
+ # As we cannot retrieve shelved_offload_time from Nova's config, we fall
+ # back to waiting for one state and if that fails then we fetch the
+ # server's state and match it against the other valid states from
+ # _action_map.
+ #
+ # Ref.: https://docs.openstack.org/api-guide/compute/server_concepts.html
+
+ _action_map = {'stop': ['SHUTOFF'],
+ 'start': ['ACTIVE'],
+ 'pause': ['PAUSED'],
+ 'unpause': ['ACTIVE'],
+ 'lock': ['ACTIVE'],
+ 'unlock': ['ACTIVE'],
+ 'suspend': ['SUSPENDED'],
+ 'reboot_soft': ['ACTIVE'],
+ 'reboot_hard': ['ACTIVE'],
+ 'resume': ['ACTIVE'],
+ 'rebuild': ['ACTIVE'],
+ 'shelve': ['SHELVED_OFFLOADED', 'SHELVED'],
+ 'shelve_offload': ['SHELVED_OFFLOADED'],
+ 'unshelve': ['ACTIVE']}
+
def run(self):
- os_server = self._preliminary_checks()
- self._execute_server_action(os_server)
- # for some reason we don't wait for lock and unlock before exit
- if self.params['action'] not in ('lock', 'unlock'):
- if self.params['wait']:
- self._wait(os_server)
- self.exit_json(changed=True)
+ # TODO: Replace with self.conn.compute.find_server(
+ # self.params['name'], all_projects=self.params['all_projects'],
+ # ignore_missing=False) when [0] has been merged.
+ # [0] https://review.opendev.org/c/openstack/openstacksdk/+/857936/
+ server = self.conn.get_server(
+ name_or_id=self.params['name'],
+ detailed=True,
+ all_projects=self.params['all_projects'])
+ if not server:
+ self.fail_json(msg='No Server found for {0}'
+ .format(self.params['name']))
+
+ action = self.params['action']
+
+ # rebuild does not depend on state
+ will_change = (
+ (action == 'rebuild')
+ or (action == 'lock' and not server['is_locked'])
+ or (action == 'unlock' and server['is_locked'])
+ or server.status.lower() not in [a.lower()
+ for a
+ in self._action_map[action]])
+
+ if not will_change:
+ self.exit_json(changed=False)
+ elif self.ansible.check_mode:
+ self.exit_json(changed=True)
+ # else perform action
+
+ if action == 'rebuild':
+ # rebuild should ensure images exists
+ image = self.conn.image.find_image(self.params['image'],
+ ignore_missing=False)
+ kwargs = dict(server=server,
+ name=server['name'],
+ image=image['id'])
+
+ admin_password = self.params['admin_password']
+ if admin_password is not None:
+ kwargs['admin_password'] = admin_password
+
+ self.conn.compute.rebuild_server(**kwargs)
+ elif action == 'shelve_offload':
+ # TODO: Replace with shelve_offload function call when [0] has been
+ # merged.
+ # [0] https://review.opendev.org/c/openstack/openstacksdk/+/857947
+
+ # shelve_offload is not supported in openstacksdk <= 1.0.0
+ response = self.conn.compute.post(
+ '/servers/{server_id}/action'.format(server_id=server['id']),
+ json={'shelveOffload': None})
+ self.sdk.exceptions.raise_from_response(response)
+ else: # action != 'rebuild' and action != 'shelve_offload'
+ action_name = action + "_server"
+
+ # reboot_* actions are using reboot_server method with an
+ # additional argument
+ if action in ['reboot_soft', 'reboot_hard']:
+ action_name = 'reboot_server'
- def _preliminary_checks(self):
- # Using Munch object for getting information about a server
- os_server = self.conn.get_server(
- self.params['server'],
- all_projects=self.params['all_projects'],
- )
- if not os_server:
- self.fail_json(msg='Could not find server %s' % self.params['server'])
- # check mode
- if self.ansible.check_mode:
- self.exit_json(changed=self.__system_state_change(os_server))
- # examine special cases
- # lock, unlock and rebuild don't depend on state, just do it
- if self.params['action'] not in ('lock', 'unlock', 'rebuild'):
- if not self.__system_state_change(os_server):
- self.exit_json(changed=False)
- return os_server
-
- def _execute_server_action(self, os_server):
- if self.params['action'] == 'rebuild':
- return self._rebuild_server(os_server)
- if self.params['action'] == 'shelve_offload':
- # shelve_offload is not supported in OpenstackSDK
- return self._action(os_server, json={'shelveOffload': None})
- action_name = self.params['action'] + "_server"
- try:
func_name = getattr(self.conn.compute, action_name)
- except AttributeError:
- self.fail_json(
- msg="Method %s wasn't found in OpenstackSDK compute" % action_name)
- func_name(os_server)
-
- def _rebuild_server(self, os_server):
- # rebuild should ensure images exists
- try:
- image = self.conn.get_image(self.params['image'])
- except Exception as e:
- self.fail_json(
- msg="Can't find the image %s: %s" % (self.params['image'], e))
- if not image:
- self.fail_json(msg="Image %s was not found!" % self.params['image'])
- # admin_password is required by SDK, but not required by Nova API
- if self.params['admin_password']:
- self.conn.compute.rebuild_server(
- server=os_server,
- name=os_server['name'],
- image=image['id'],
- admin_password=self.params['admin_password']
- )
- else:
- self._action(os_server, json={'rebuild': {'imageRef': image['id']}})
-
- def _action(self, os_server, json):
- response = self.conn.compute.post(
- '/servers/{server_id}/action'.format(server_id=os_server['id']),
- json=json)
- self.sdk.exceptions.raise_from_response(response)
- return response
-
- def _wait(self, os_server):
- """Wait for the server to reach the desired state for the given action."""
- # The wait_for_server function needs a Server object instead of the
- # Munch object returned by self.conn.get_server
- server = self.conn.compute.get_server(os_server['id'])
- states = _action_map[self.params['action']]
-
- try:
- self.conn.compute.wait_for_server(
- server,
- status=states[0],
- wait=self.params['timeout'])
- except self.sdk.exceptions.ResourceTimeout:
- # raise if there is only one valid state
- if len(states) < 2:
- raise
- # fetch current server status and compare to other valid states
- server = self.conn.compute.get_server(os_server['id'])
- if server.status not in states:
- raise
-
- def __system_state_change(self, os_server):
- """Check if system state would change."""
- return os_server.status not in _action_map[self.params['action']]
+
+ # Do the action
+ if action == 'reboot_soft':
+ func_name(server, 'SOFT')
+ elif action == 'reboot_hard':
+ func_name(server, 'HARD')
+ else:
+ func_name(server)
+
+ if self.params['wait']:
+ for count in self.sdk.utils.iterate_timeout(
+ timeout=self.params['timeout'],
+ message='Timeout waiting for action {0} to be completed.'
+ .format(action)
+ ):
+ server = self.conn.compute.get_server(server['id'])
+
+ if (action == 'lock' and server['is_locked']) \
+ or (action == 'unlock' and not server['is_locked']):
+ break
+
+ states = [s.lower() for s in self._action_map[action]]
+ if server.status.lower() in states:
+ break
+
+ self.exit_json(changed=True)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server_group.py b/ansible_collections/openstack/cloud/plugins/modules/server_group.py
index 84f59e6cb..58b54416a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server_group.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server_group.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2016 Catalyst IT Limited
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -13,10 +14,9 @@ description:
options:
state:
description:
- - Indicate desired state of the resource. When I(state) is 'present',
- then I(policies) is required.
+ - Indicate desired state of the resource. When I(state) is C(present),
+ then I(policy) is required.
choices: ['present', 'absent']
- required: false
default: present
type: str
name:
@@ -24,76 +24,84 @@ options:
- Server group name.
required: true
type: str
- policies:
+ policy:
description:
- - A list of one or more policy names to associate with the server
- group. The list must contain at least one policy name. The current
- valid policy names are anti-affinity, affinity, soft-anti-affinity
- and soft-affinity.
- required: false
- type: list
- elements: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ - Represents the current name of the policy.
+ choices: ['anti-affinity', 'affinity', 'soft-anti-affinity', 'soft-affinity']
+ type: str
+ rules:
+ description:
+ - Rules to be applied to the policy. Currently, only the
+ C(max_server_per_host) rule is supported for the C(anti-affinity)
+ policy.
+ type: dict
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
-# Create a server group with 'affinity' policy.
-- openstack.cloud.server_group:
+- name: Create a server group with 'affinity' policy.
+ openstack.cloud.server_group:
+ cloud: "{{ cloud }}"
state: present
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
name: my_server_group
- policies:
- - affinity
+ policy: affinity
-# Delete 'my_server_group' server group.
-- openstack.cloud.server_group:
+- name: Delete 'my_server_group' server group.
+ openstack.cloud.server_group:
+ cloud: "{{ cloud }}"
state: absent
- auth:
- auth_url: https://identity.example.com
- username: admin
- password: admin
- project_name: admin
name: my_server_group
'''
RETURN = '''
-id:
- description: Unique UUID.
- returned: success
- type: str
-name:
- description: The name of the server group.
- returned: success
- type: str
-policies:
- description: A list of one or more policy names of the server group.
- returned: success
- type: list
-members:
- description: A list of members in the server group.
- returned: success
- type: list
-metadata:
- description: Metadata key and value pairs.
- returned: success
+server_group:
+ description: Object representing the server group
+ returned: On success when I(state) is present
type: dict
-project_id:
- description: The project ID who owns the server group.
- returned: success
- type: str
-user_id:
- description: The user ID who owns the server group.
- returned: success
- type: str
+ contains:
+ id:
+ description: Unique UUID.
+ returned: always
+ type: str
+ name:
+ description: The name of the server group.
+ returned: always
+ type: str
+ policies:
+ description: |
+ A list of exactly one policy name to associate with the group.
+ Available until microversion 2.63
+ returned: always
+ type: list
+ policy:
+ description: |
+ Represents the name of the policy. Available from version 2.64 on.
+ returned: always
+ type: str
+ member_ids:
+ description: The list of members in the server group
+ returned: always
+ type: list
+ metadata:
+ description: Metadata key and value pairs.
+ returned: always
+ type: dict
+ project_id:
+ description: The project ID who owns the server group.
+ returned: always
+ type: str
+ rules:
+ description: |
+ The rules field, applied to the policy. Currently, only the
+ C(max_server_per_host) rule is supported for the
+ C(anti-affinity) policy.
+ returned: always
+ type: dict
+ user_id:
+ description: The user ID who owns the server group.
+ returned: always
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -102,12 +110,17 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ServerGroupModule(OpenStackModule):
argument_spec = dict(
name=dict(required=True),
- policies=dict(required=False, type='list', elements='str'),
+ policy=dict(choices=['anti-affinity', 'affinity', 'soft-anti-affinity',
+ 'soft-affinity']),
state=dict(default='present', choices=['absent', 'present']),
+ rules=dict(type='dict')
)
module_kwargs = dict(
supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['policy'])
+ ],
)
def _system_state_change(self, state, server_group):
@@ -120,10 +133,9 @@ class ServerGroupModule(OpenStackModule):
def run(self):
name = self.params['name']
- policies = self.params['policies']
state = self.params['state']
- server_group = self.conn.get_server_group(name)
+ server_group = self.conn.compute.find_server_group(name)
if self.ansible.check_mode:
self.exit_json(
@@ -133,22 +145,19 @@ class ServerGroupModule(OpenStackModule):
changed = False
if state == 'present':
if not server_group:
- if not policies:
- self.fail_json(
- msg="Parameter 'policies' is required in Server Group "
- "Create"
- )
- server_group = self.conn.create_server_group(name, policies)
+ kwargs = {k: self.params[k]
+ for k in ['name', 'policy', 'rules']
+ if self.params[k] is not None}
+ server_group = self.conn.compute.create_server_group(**kwargs)
changed = True
self.exit_json(
changed=changed,
- id=server_group['id'],
- server_group=server_group
+ server_group=server_group.to_dict(computed=False)
)
if state == 'absent':
if server_group:
- self.conn.delete_server_group(server_group['id'])
+ self.conn.compute.delete_server_group(server_group)
changed = True
self.exit_json(changed=changed)
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server_info.py b/ansible_collections/openstack/cloud/plugins/modules/server_info.py
index bac1d2114..6ab87952a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server_info.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -10,51 +11,348 @@ short_description: Retrieve information about one or more compute instances
author: OpenStack Ansible SIG
description:
- Retrieve information about server instances from OpenStack.
- - This module was called C(os_server_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.server_info) module no longer returns C(ansible_facts)!
notes:
- The result contains a list of servers.
options:
- server:
+ name:
description:
- restrict results to servers with names or UUID matching
- this glob expression (e.g., <web*>).
+ this glob expression such as web*.
+ aliases: ['server']
type: str
detailed:
description:
- when true, return additional detail about servers at the expense
of additional API calls.
type: bool
- default: 'no'
+ default: 'false'
filters:
- description:
- - restrict results to servers matching a dictionary of
- filters
+ description: |
+ Used for further filtering of results. Either a string containing a
+ JMESPath expression or a dictionary of meta data. Elements of the latter
+ may, themselves, be dictionaries.
type: dict
all_projects:
description:
- Whether to list servers from all projects or just the current auth
scoped project.
type: bool
- default: 'no'
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ default: 'false'
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
-# Gather information about all servers named <web*> that are in an active state:
-- openstack.cloud.server_info:
- cloud: rax-dfw
- server: web*
+- name: Gather information about all 'web*' servers in active state
+ openstack.cloud.server_info:
+ cloud: devstack
+ name: web*
filters:
vm_state: active
- register: result
-- debug:
- msg: "{{ result.openstack_servers }}"
+
+- name: Filter servers with nested dictionaries
+ openstack.cloud.server_info:
+ cloud: devstack
+ filters:
+ metadata:
+ key1: value1
+ key2: value2
+'''
+
+RETURN = '''
+servers:
+ description: List of servers matching the filters
+ elements: dict
+ type: list
+ returned: always
+ contains:
+ access_ipv4:
+ description: |
+ IPv4 address that should be used to access this server.
+ May be automatically set by the provider.
+ returned: success
+ type: str
+ access_ipv6:
+ description: |
+ IPv6 address that should be used to access this
+ server. May be automatically set by the provider.
+ returned: success
+ type: str
+ addresses:
+ description: |
+ A dictionary of addresses this server can be accessed through.
+ The dictionary contains keys such as 'private' and 'public',
+ each containing a list of dictionaries for addresses of that
+ type. The addresses are contained in a dictionary with keys
+ 'addr' and 'version', which is either 4 or 6 depending on the
+ protocol of the IP address.
+ returned: success
+ type: dict
+ admin_password:
+ description: |
+ When a server is first created, it provides the administrator
+ password.
+ returned: success
+ type: str
+ attached_volumes:
+ description: |
+ A list of an attached volumes. Each item in the list contains
+ at least an 'id' key to identify the specific volumes.
+ returned: success
+ type: list
+ availability_zone:
+ description: |
+ The name of the availability zone this server is a part of.
+ returned: success
+ type: str
+ block_device_mapping:
+ description: |
+ Enables fine grained control of the block device mapping for an
+ instance. This is typically used for booting servers from
+ volumes.
+ returned: success
+ type: str
+ compute_host:
+ description: |
+ The name of the compute host on which this instance is running.
+ Appears in the response for administrative users only.
+ returned: success
+ type: str
+ config_drive:
+ description: |
+ Indicates whether or not a config drive was used for this
+ server.
+ returned: success
+ type: str
+ created_at:
+ description: Timestamp of when the server was created.
+ returned: success
+ type: str
+ description:
+ description: |
+ The description of the server. Before microversion
+ 2.19 this was set to the server name.
+ returned: success
+ type: str
+ disk_config:
+ description: The disk configuration. Either AUTO or MANUAL.
+ returned: success
+ type: str
+ flavor:
+ description: The flavor property as returned from server.
+ returned: success
+ type: dict
+ flavor_id:
+ description: |
+ The flavor reference, as a ID or full URL, for the flavor to
+ use for this server.
+ returned: success
+ type: str
+ has_config_drive:
+ description: |
+ Indicates whether a configuration drive enables metadata
+ injection. Not all cloud providers enable this feature.
+ returned: success
+ type: str
+ host_id:
+ description: An ID representing the host of this server.
+ returned: success
+ type: str
+ host_status:
+ description: The host status.
+ returned: success
+ type: str
+ hostname:
+ description: |
+ The hostname set on the instance when it is booted.
+ By default, it appears in the response for administrative users
+ only.
+ returned: success
+ type: str
+ hypervisor_hostname:
+ description: |
+ The hypervisor host name. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ id:
+ description: ID of the server.
+ returned: success
+ type: str
+ image:
+ description: The image property as returned from server.
+ returned: success
+ type: dict
+ image_id:
+ description: |
+ The image reference, as a ID or full URL, for the image to use
+ for this server.
+ returned: success
+ type: str
+ instance_name:
+ description: |
+ The instance name. The Compute API generates the instance name
+ from the instance name template. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ is_locked:
+ description: The locked status of the server
+ returned: success
+ type: bool
+ kernel_id:
+ description: |
+ The UUID of the kernel image when using an AMI. Will be null if
+ not. By default, it appears in the response for administrative
+ users only.
+ returned: success
+ type: str
+ key_name:
+ description: The name of an associated keypair.
+ returned: success
+ type: str
+ launch_index:
+ description: |
+ When servers are launched via multiple create, this is the
+ sequence in which the servers were launched. By default, it
+ appears in the response for administrative users only.
+ returned: success
+ type: int
+ launched_at:
+ description: The timestamp when the server was launched.
+ returned: success
+ type: str
+ links:
+ description: |
+ A list of dictionaries holding links relevant to this server.
+ returned: success
+ type: str
+ max_count:
+ description: The maximum number of servers to create.
+ returned: success
+ type: str
+ metadata:
+ description: List of tag strings.
+ returned: success
+ type: dict
+ min_count:
+ description: The minimum number of servers to create.
+ returned: success
+ type: str
+ name:
+ description: Name of the server
+ returned: success
+ type: str
+ networks:
+ description: |
+ A networks object. Required parameter when there are multiple
+ networks defined for the tenant. When you do not specify the
+ networks parameter, the server attaches to the only network
+ created for the current tenant.
+ returned: success
+ type: str
+ power_state:
+ description: The power state of this server.
+ returned: success
+ type: str
+ progress:
+ description: |
+ While the server is building, this value represents the
+ percentage of completion. Once it is completed, it will be 100.
+ returned: success
+ type: int
+ project_id:
+ description: The ID of the project this server is associated with.
+ returned: success
+ type: str
+ ramdisk_id:
+ description: |
+ The UUID of the ramdisk image when using an AMI. Will be null
+ if not. By default, it appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ reservation_id:
+ description: |
+ The reservation id for the server. This is an id that can be
+ useful in tracking groups of servers created with multiple
+ create, that will all have the same reservation_id. By default,
+ it appears in the response for administrative users only.
+ returned: success
+ type: str
+ root_device_name:
+ description: |
+ The root device name for the instance By default, it appears in
+ the response for administrative users only.
+ returned: success
+ type: str
+ scheduler_hints:
+ description: The dictionary of data to send to the scheduler.
+ returned: success
+ type: dict
+ security_groups:
+ description: |
+ A list of applicable security groups. Each group contains keys
+ for: description, name, id, and rules.
+ returned: success
+ type: list
+ elements: dict
+ server_groups:
+ description: |
+ The UUIDs of the server groups to which the server belongs.
+ Currently this can contain at most one entry.
+ returned: success
+ type: list
+ status:
+ description: |
+ The state this server is in. Valid values include 'ACTIVE',
+ 'BUILDING', 'DELETED', 'ERROR', 'HARD_REBOOT', 'PASSWORD',
+ 'PAUSED', 'REBOOT', 'REBUILD', 'RESCUED', 'RESIZED',
+ 'REVERT_RESIZE', 'SHUTOFF', 'SOFT_DELETED', 'STOPPED',
+ 'SUSPENDED', 'UNKNOWN', or 'VERIFY_RESIZE'.
+ returned: success
+ type: str
+ tags:
+ description: A list of associated tags.
+ returned: success
+ type: list
+ task_state:
+ description: The task state of this server.
+ returned: success
+ type: str
+ terminated_at:
+ description: |
+ The timestamp when the server was terminated (if it has been).
+ returned: success
+ type: str
+ trusted_image_certificates:
+ description: |
+ A list of trusted certificate IDs, that were used during image
+ signature verification to verify the signing certificate.
+ returned: success
+ type: list
+ updated_at:
+ description: Timestamp of when this server was last updated.
+ returned: success
+ type: str
+ user_data:
+ description: |
+ Configuration information or scripts to use upon launch.
+ Base64 encoded.
+ returned: success
+ type: str
+ user_id:
+ description: The ID of the owners of this server.
+ returned: success
+ type: str
+ vm_state:
+ description: The VM state of this server.
+ returned: success
+ type: str
+ volumes:
+ description: Same as attached_volumes.
+ returned: success
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -62,29 +360,25 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ServerInfoModule(OpenStackModule):
- deprecated_names = ('os_server_info', 'openstack.cloud.os_server_info')
-
argument_spec = dict(
- server=dict(required=False),
- detailed=dict(required=False, type='bool', default=False),
- filters=dict(required=False, type='dict', default=None),
- all_projects=dict(required=False, type='bool', default=False),
+ name=dict(aliases=['server']),
+ detailed=dict(type='bool', default=False),
+ filters=dict(type='dict'),
+ all_projects=dict(type='bool', default=False),
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['detailed', 'filters', 'all_projects']
+ if self.params[k] is not None)
+ kwargs['name_or_id'] = self.params['name']
- kwargs = self.check_versioned(
- detailed=self.params['detailed'],
- filters=self.params['filters'],
- all_projects=self.params['all_projects']
- )
- if self.params['server']:
- kwargs['name_or_id'] = self.params['server']
- openstack_servers = self.conn.search_servers(**kwargs)
- self.exit(changed=False, openstack_servers=openstack_servers)
+ self.exit(changed=False,
+ servers=[server.to_dict(computed=False) for server in
+ self.conn.search_servers(**kwargs)])
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server_metadata.py b/ansible_collections/openstack/cloud/plugins/modules/server_metadata.py
index a1207e3b3..5764c5c68 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server_metadata.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server_metadata.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2016, Mario Santos <mario.rf.santos@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -12,18 +12,21 @@ author: OpenStack Ansible SIG
description:
- Add, Update or Remove metadata in compute instances from OpenStack.
options:
- server:
+ name:
description:
- Name of the instance to update the metadata
required: true
- aliases: ['name']
+ aliases: ['server']
type: str
- meta:
+ metadata:
description:
- 'A list of key value pairs that should be provided as a metadata to
the instance or a string containing a list of key-value pairs.
Eg: meta: "key1=value1,key2=value2"'
+ - Note that when I(state) is C(true), metadata already existing on the
+ server will not be cleared.
required: true
+ aliases: [meta]
type: dict
state:
description:
@@ -31,66 +34,327 @@ options:
choices: [present, absent]
default: present
type: str
- availability_zone:
- description:
- - Availability zone in which to create the snapshot.
- required: false
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
# Creates or updates hostname=test1 as metadata of the server instance vm1
-- name: add metadata to compute instance
- hosts: localhost
- tasks:
- - name: add metadata to instance
- openstack.cloud.server_metadata:
- state: present
- auth:
- auth_url: https://openstack-api.example.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
- name: vm1
- meta:
- hostname: test1
- group: group1
+# Note that existing keys will not be cleared
+- name: add metadata to instance
+ openstack.cloud.server_metadata:
+ state: present
+ cloud: "{{ cloud }}"
+ name: vm1
+ metadata:
+ hostname: test1
+ group: group1
# Removes the keys under meta from the instance named vm1
-- name: delete metadata from compute instance
- hosts: localhost
- tasks:
- - name: delete metadata from instance
- openstack.cloud.server_metadata:
+- name: delete metadata from instance
+ openstack.cloud.server_metadata:
state: absent
- auth:
- auth_url: https://openstack-api.example.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
+ cloud: "{{ cloud }}"
name: vm1
meta:
hostname:
group:
+ public_keys:
'''
RETURN = '''
-server_id:
- description: The compute instance id where the change was made
- returned: success
- type: str
- sample: "324c4e91-3e03-4f62-9a4d-06119a8a8d16"
-metadata:
- description: The metadata of compute instance after the change
- returned: success
+server:
+ description: Dictionary describing the server that was updated.
type: dict
- sample: {'key1': 'value1', 'key2': 'value2'}
+ returned: On success when I(state) is 'present'.
+ contains:
+ access_ipv4:
+ description: |
+ IPv4 address that should be used to access this server.
+ May be automatically set by the provider.
+ returned: success
+ type: str
+ access_ipv6:
+ description: |
+ IPv6 address that should be used to access this
+ server. May be automatically set by the provider.
+ returned: success
+ type: str
+ addresses:
+ description: |
+ A dictionary of addresses this server can be accessed through.
+ The dictionary contains keys such as 'private' and 'public',
+ each containing a list of dictionaries for addresses of that
+ type. The addresses are contained in a dictionary with keys
+ 'addr' and 'version', which is either 4 or 6 depending on the
+ protocol of the IP address.
+ returned: success
+ type: dict
+ admin_password:
+ description: |
+ When a server is first created, it provides the administrator
+ password.
+ returned: success
+ type: str
+ attached_volumes:
+ description: |
+ A list of an attached volumes. Each item in the list contains
+ at least an 'id' key to identify the specific volumes.
+ returned: success
+ type: list
+ availability_zone:
+ description: |
+ The name of the availability zone this server is a part of.
+ returned: success
+ type: str
+ block_device_mapping:
+ description: |
+ Enables fine grained control of the block device mapping for an
+ instance. This is typically used for booting servers from
+ volumes.
+ returned: success
+ type: str
+ compute_host:
+ description: |
+ The name of the compute host on which this instance is running.
+ Appears in the response for administrative users only.
+ returned: success
+ type: str
+ config_drive:
+ description: |
+ Indicates whether or not a config drive was used for this
+ server.
+ returned: success
+ type: str
+ created_at:
+ description: Timestamp of when the server was created.
+ returned: success
+ type: str
+ description:
+ description: |
+ The description of the server. Before microversion
+ 2.19 this was set to the server name.
+ returned: success
+ type: str
+ disk_config:
+ description: The disk configuration. Either AUTO or MANUAL.
+ returned: success
+ type: str
+ flavor:
+ description: The flavor property as returned from server.
+ returned: success
+ type: dict
+ flavor_id:
+ description: |
+ The flavor reference, as a ID or full URL, for the flavor to
+ use for this server.
+ returned: success
+ type: str
+ has_config_drive:
+ description: |
+ Indicates whether a configuration drive enables metadata
+ injection. Not all cloud providers enable this feature.
+ returned: success
+ type: str
+ host_id:
+ description: An ID representing the host of this server.
+ returned: success
+ type: str
+ host_status:
+ description: The host status.
+ returned: success
+ type: str
+ hostname:
+ description: |
+ The hostname set on the instance when it is booted.
+ By default, it appears in the response for administrative users
+ only.
+ returned: success
+ type: str
+ hypervisor_hostname:
+ description: |
+ The hypervisor host name. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ id:
+ description: ID of the server.
+ returned: success
+ type: str
+ image:
+ description: The image property as returned from server.
+ returned: success
+ type: dict
+ image_id:
+ description: |
+ The image reference, as a ID or full URL, for the image to use
+ for this server.
+ returned: success
+ type: str
+ instance_name:
+ description: |
+ The instance name. The Compute API generates the instance name
+ from the instance name template. Appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ is_locked:
+ description: The locked status of the server
+ returned: success
+ type: bool
+ kernel_id:
+ description: |
+ The UUID of the kernel image when using an AMI. Will be null if
+ not. By default, it appears in the response for administrative
+ users only.
+ returned: success
+ type: str
+ key_name:
+ description: The name of an associated keypair.
+ returned: success
+ type: str
+ launch_index:
+ description: |
+ When servers are launched via multiple create, this is the
+ sequence in which the servers were launched. By default, it
+ appears in the response for administrative users only.
+ returned: success
+ type: int
+ launched_at:
+ description: The timestamp when the server was launched.
+ returned: success
+ type: str
+ links:
+ description: |
+ A list of dictionaries holding links relevant to this server.
+ returned: success
+ type: str
+ max_count:
+ description: The maximum number of servers to create.
+ returned: success
+ type: str
+ metadata:
+ description: List of tag strings.
+ returned: success
+ type: dict
+ min_count:
+ description: The minimum number of servers to create.
+ returned: success
+ type: str
+ name:
+ description: Name of the server
+ returned: success
+ type: str
+ networks:
+ description: |
+ A networks object. Required parameter when there are multiple
+ networks defined for the tenant. When you do not specify the
+ networks parameter, the server attaches to the only network
+ created for the current tenant.
+ returned: success
+ type: str
+ power_state:
+ description: The power state of this server.
+ returned: success
+ type: str
+ progress:
+ description: |
+ While the server is building, this value represents the
+ percentage of completion. Once it is completed, it will be 100.
+ returned: success
+ type: int
+ project_id:
+ description: The ID of the project this server is associated with.
+ returned: success
+ type: str
+ ramdisk_id:
+ description: |
+ The UUID of the ramdisk image when using an AMI. Will be null
+ if not. By default, it appears in the response for
+ administrative users only.
+ returned: success
+ type: str
+ reservation_id:
+ description: |
+ The reservation id for the server. This is an id that can be
+ useful in tracking groups of servers created with multiple
+ create, that will all have the same reservation_id. By default,
+ it appears in the response for administrative users only.
+ returned: success
+ type: str
+ root_device_name:
+ description: |
+ The root device name for the instance By default, it appears in
+ the response for administrative users only.
+ returned: success
+ type: str
+ scheduler_hints:
+ description: The dictionary of data to send to the scheduler.
+ returned: success
+ type: dict
+ security_groups:
+ description: |
+ A list of applicable security groups. Each group contains keys
+ for: description, name, id, and rules.
+ returned: success
+ type: list
+ elements: dict
+ server_groups:
+ description: |
+ The UUIDs of the server groups to which the server belongs.
+ Currently this can contain at most one entry.
+ returned: success
+ type: list
+ status:
+ description: |
+ The state this server is in. Valid values include 'ACTIVE',
+ 'BUILDING', 'DELETED', 'ERROR', 'HARD_REBOOT', 'PASSWORD',
+ 'PAUSED', 'REBOOT', 'REBUILD', 'RESCUED', 'RESIZED',
+ 'REVERT_RESIZE', 'SHUTOFF', 'SOFT_DELETED', 'STOPPED',
+ 'SUSPENDED', 'UNKNOWN', or 'VERIFY_RESIZE'.
+ returned: success
+ type: str
+ tags:
+ description: A list of associated tags.
+ returned: success
+ type: list
+ task_state:
+ description: The task state of this server.
+ returned: success
+ type: str
+ terminated_at:
+ description: |
+ The timestamp when the server was terminated (if it has been).
+ returned: success
+ type: str
+ trusted_image_certificates:
+ description: |
+ A list of trusted certificate IDs, that were used during image
+ signature verification to verify the signing certificate.
+ returned: success
+ type: list
+ updated_at:
+ description: Timestamp of when this server was last updated.
+ returned: success
+ type: str
+ user_data:
+ description: |
+ Configuration information or scripts to use upon launch.
+ Base64 encoded.
+ returned: success
+ type: str
+ user_id:
+ description: The ID of the owners of this server.
+ returned: success
+ type: str
+ vm_state:
+ description: The VM state of this server.
+ returned: success
+ type: str
+ volumes:
+ description: Same as attached_volumes.
+ returned: success
+ type: list
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -98,62 +362,79 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class ServerMetadataModule(OpenStackModule):
argument_spec = dict(
- server=dict(required=True, aliases=['name']),
- meta=dict(required=True, type='dict'),
+ name=dict(required=True, aliases=['server']),
+ metadata=dict(required=True, type='dict', aliases=['meta']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = dict(
supports_check_mode=True
)
- def _needs_update(self, server_metadata=None, metadata=None):
- if server_metadata is None:
- server_metadata = {}
- if metadata is None:
- metadata = {}
- return len(set(metadata.items()) - set(server_metadata.items())) != 0
-
- def _get_keys_to_delete(self, server_metadata_keys=None, metadata_keys=None):
- if server_metadata_keys is None:
- server_metadata_keys = []
- if metadata_keys is None:
- metadata_keys = []
- return set(server_metadata_keys) & set(metadata_keys)
-
def run(self):
state = self.params['state']
- server_param = self.params['server']
- meta_param = self.params['meta']
- changed = False
+ server_name_or_id = self.params['name']
+ metadata = self.params['metadata']
- server = self.conn.get_server(server_param)
- if not server:
- self.fail_json(
- msg='Could not find server {0}'.format(server_param))
+ server = self.conn.compute.find_server(server_name_or_id,
+ ignore_missing=False)
+ # openstacksdk will not return details when looking up by name, so we
+ # need to refresh the server to get the metadata when updating.
+ # Can remove when
+ # https://review.opendev.org/c/openstack/openstacksdk/+/857987 merges
+ server = self.conn.compute.get_server(server.id)
+ if self.ansible.check_mode:
+ self.exit_json(**self._check_mode_values(state, server, metadata))
+
+ changed = False
if state == 'present':
- # check if it needs update
- if self._needs_update(
- server_metadata=server.metadata, metadata=meta_param
- ):
- if not self.ansible.check_mode:
- self.conn.set_server_metadata(server_param, meta_param)
+ update = self._build_update(server.metadata, metadata)
+ if update:
+ # Pass in all metadata keys to set_server_metadata so server
+ # object keeps all the keys
+ new_metadata = (server.metadata or {})
+ new_metadata.update(update)
+ self.conn.compute.set_server_metadata(server,
+ **new_metadata)
changed = True
elif state == 'absent':
- # remove from params the keys that do not exist in the server
- keys_to_delete = self._get_keys_to_delete(
- server.metadata.keys(), meta_param.keys())
- if len(keys_to_delete) > 0:
- if not self.ansible.check_mode:
- self.conn.delete_server_metadata(
- server_param, keys_to_delete)
+ # Only remove keys that exist on the server
+ keys_to_delete = self._get_keys_to_delete(server.metadata,
+ metadata)
+ if keys_to_delete:
+ self.conn.compute.delete_server_metadata(server,
+ keys_to_delete)
changed = True
- if changed:
- server = self.conn.get_server(server_param)
+ self.exit_json(changed=changed,
+ server=server.to_dict(computed=False))
+
+ def _build_update(self, current=None, requested=None):
+ current = current or {}
+ requested = requested or {}
+ update = dict(requested.items() - current.items())
+ return update
+
+ def _get_keys_to_delete(self, current=None, requested=None):
+ current = current or {}
+ requested = requested or {}
+ return set(current.keys() & requested.keys())
- self.exit_json(
- changed=changed, server_id=server.id, metadata=server.metadata)
+ def _check_mode_values(self, state, server, meta):
+ "Builds return values for check mode"
+ changed = False
+ if state == 'present':
+ update = self._build_update(server.metadata, meta)
+ if update:
+ changed = True
+ new_metadata = (server.metadata or {})
+ new_metadata.update(update)
+ server.metadata = new_metadata
+ else:
+ keys = self._get_keys_to_delete(server.metadata, meta)
+ for k in keys:
+ server.meta.pop(k)
+ return dict(changed=changed, server=server.to_dict(computed=False))
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/server_volume.py b/ansible_collections/openstack/cloud/plugins/modules/server_volume.py
index 1deb8fa6e..a7385c724 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/server_volume.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/server_volume.py
@@ -1,10 +1,10 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: server_volume
short_description: Attach/Detach Volumes from OpenStack VM's
@@ -12,47 +12,144 @@ author: OpenStack Ansible SIG
description:
- Attach or Detach volumes from OpenStack VM's
options:
- state:
+ device:
description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- required: false
+ - Device you want to attach. Defaults to auto finding a device name.
type: str
server:
description:
- Name or ID of server you want to attach a volume to
required: true
type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
volume:
description:
- Name or id of volume you want to attach to a server
required: true
type: str
- device:
- description:
- - Device you want to attach. Defaults to auto finding a device name.
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Attaches a volume to a compute host
-- name: attach a volume
- hosts: localhost
- tasks:
- - name: attach volume to host
- openstack.cloud.server_volume:
- state: present
- cloud: mordred
- server: Mysql-server
- volume: mysql-data
- device: /dev/vdb
+RETURN = r'''
+volume:
+ type: dict
+ description: Volume that was just attached
+ returned: On success when I(state) is present
+ contains:
+ attachments:
+ description: Instance attachment information. If this volume is attached
+ to a server instance, the attachments list includes the UUID
+ of the attached server, an attachment UUID, the name of the
+ attached host, if any, the volume UUID, the device, and the
+ device UUID. Otherwise, this list is empty.
+ type: list
+ availability_zone:
+ description: The name of the availability zone.
+ type: str
+ consistency_group_id:
+ description: The UUID of the consistency group.
+ type: str
+ created_at:
+ description: The date and time when the resource was created.
+ type: str
+ description:
+ description: The volume description.
+ type: str
+ extended_replication_status:
+ description: Extended replication status on this volume.
+ type: str
+ group_id:
+ description: The ID of the group.
+ type: str
+ host:
+ description: The volume's current back-end.
+ type: str
+ id:
+ description: The UUID of the volume.
+ type: str
+ image_id:
+ description: Image on which the volume was based
+ type: str
+ is_bootable:
+ description: Enables or disables the bootable attribute. You can boot an
+ instance from a bootable volume.
+ type: str
+ is_encrypted:
+ description: If true, this volume is encrypted.
+ type: bool
+ metadata:
+ description: A metadata object. Contains one or more metadata key and
+ value pairs that are associated with the volume.
+ type: dict
+ migration_id:
+ description: The volume ID that this volume name on the backend is
+ based on.
+ type: str
+ migration_status:
+ description: The status of this volume migration (None means that a
+ migration is not currently in progress).
+ type: str
+ name:
+ description: The volume name.
+ type: str
+ project_id:
+ description: The project ID which the volume belongs to.
+ type: str
+ replication_driver_data:
+ description: Data set by the replication driver
+ type: str
+ replication_status:
+ description: The volume replication status.
+ type: str
+ scheduler_hints:
+ description: Scheduler hints for the volume
+ type: dict
+ size:
+ description: The size of the volume, in gibibytes (GiB).
+ type: int
+ snapshot_id:
+ description: To create a volume from an existing snapshot, specify the
+ UUID of the volume snapshot. The volume is created in same
+ availability zone and with same size as the snapshot.
+ type: str
+ source_volume_id:
+ description: The UUID of the source volume. The API creates a new volume
+ with the same size as the source volume unless a larger size
+ is requested.
+ type: str
+ status:
+ description: The volume status.
+ type: str
+ updated_at:
+ description: The date and time when the resource was updated.
+ type: str
+ user_id:
+ description: The UUID of the user.
+ type: str
+ volume_image_metadata:
+ description: List of image metadata entries. Only included for volumes
+ that were created from an image, or from a snapshot of a
+ volume originally created from an image.
+ type: dict
+ volume_type:
+ description: The associated volume type name for the volume.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Attaches a volume to a compute host
+ openstack.cloud.server_volume:
+ state: present
+ cloud: mordred
+ server: Mysql-server
+ volume: mysql-data
+ device: /dev/vdb
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -60,15 +157,8 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
def _system_state_change(state, device):
"""Check if system state would change."""
- if state == 'present':
- if device:
- return False
- return True
- if state == 'absent':
- if device:
- return True
- return False
- return False
+ return (state == 'present' and not device) \
+ or (state == 'absent' and device)
class ServerVolumeModule(OpenStackModule):
@@ -76,58 +166,45 @@ class ServerVolumeModule(OpenStackModule):
argument_spec = dict(
server=dict(required=True),
volume=dict(required=True),
- device=dict(default=None), # None == auto choose device name
+ device=dict(), # None == auto choose device name
state=dict(default='present', choices=['absent', 'present']),
)
def run(self):
-
state = self.params['state']
wait = self.params['wait']
timeout = self.params['timeout']
- server = self.conn.get_server(self.params['server'])
- volume = self.conn.get_volume(self.params['volume'])
-
- if not server:
- self.fail(msg='server %s is not found' % self.params['server'])
-
- if not volume:
- self.fail(msg='volume %s is not found' % self.params['volume'])
+ server = self.conn.compute.find_server(self.params['server'],
+ ignore_missing=False)
+ volume = self.conn.block_storage.find_volume(self.params['volume'],
+ ignore_missing=False)
dev = self.conn.get_volume_attach_device(volume, server.id)
if self.ansible.check_mode:
- self.exit(changed=_system_state_change(state, dev))
+ self.exit_json(changed=_system_state_change(state, dev))
if state == 'present':
changed = False
if not dev:
changed = True
- self.conn.attach_volume(server, volume, self.params['device'],
+ self.conn.attach_volume(server, volume,
+ device=self.params['device'],
wait=wait, timeout=timeout)
+ # refresh volume object
+ volume = self.conn.block_storage.get_volume(volume.id)
- server = self.conn.get_server(self.params['server']) # refresh
- volume = self.conn.get_volume(self.params['volume']) # refresh
- hostvars = self.conn.get_openstack_vars(server)
-
- self.exit(
- changed=changed,
- id=volume['id'],
- attachments=volume['attachments'],
- openstack=hostvars
- )
+ self.exit_json(changed=changed,
+ volume=volume.to_dict(computed=False))
elif state == 'absent':
if not dev:
# Volume is not attached to this server
- self.exit(changed=False)
+ self.exit_json(changed=False)
self.conn.detach_volume(server, volume, wait=wait, timeout=timeout)
- self.exit(
- changed=True,
- result='Detached volume from server'
- )
+ self.exit_json(changed=True)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/stack.py b/ansible_collections/openstack/cloud/plugins/modules/stack.py
index 95b7bef5e..4c317fe78 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/stack.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/stack.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
@@ -13,59 +13,64 @@ author: OpenStack Ansible SIG
description:
- Add or Remove a Stack to an OpenStack Heat
options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Name of the stack that should be created, name could be char and digit, no space
- required: true
- type: str
- tag:
- description:
- - Tag for the stack that should be created, name could be char and digit, no space
- type: str
- template:
- description:
- - Path of the template file to use for the stack creation
- type: str
environment:
description:
- List of environment files that should be used for the stack creation
type: list
elements: str
+ name:
+ description:
+ - A name for the stack.
+ - The value must be unique within a project.
+ - The name must start with an ASCII letter and can contain ASCII
+ letters, digits, underscores, periods, and hyphens. Specifically,
+ the name must match the C(^[a-zA-Z][a-zA-Z0-9_.-]{0,254}$) regular
+ expression.
+ - When you delete or abandon a stack, its name will not become
+ available for reuse until the deletion completes successfully.
+ required: true
+ type: str
parameters:
description:
- Dictionary of parameters for the stack creation
type: dict
+ default: {}
rollback:
description:
- Rollback stack creation
type: bool
default: false
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ tags:
+ description:
+ - One or more simple string tags to associate with the stack.
+ - To associate multiple tags with a stack, separate the tags with
+ commas. For example, C(tag1,tag2).
+ type: str
+ aliases: ['tag']
+ template:
+ description:
+ - Path of the template file to use for the stack creation
+ type: str
timeout:
description:
- Maximum number of seconds to wait for the stack creation
default: 3600
type: int
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
+
EXAMPLES = '''
----
- name: create stack
- ignore_errors: True
- register: stack_create
openstack.cloud.stack:
- name: "{{ stack_name }}"
- tag: "{{ tag_name }}"
+ name: "teststack"
+ tag: "tag1,tag2"
state: present
template: "/path/to/my_stack.yaml"
environment:
@@ -75,62 +80,90 @@ EXAMPLES = '''
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
- private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
- external_net: "{{ external_net_param }}"
'''
RETURN = '''
-id:
- description: Stack ID.
- type: str
- sample: "97a3f543-8136-4570-920e-fd7605c989d6"
- returned: always
-
stack:
description: stack info
- type: complex
+ type: dict
returned: always
contains:
- action:
- description: Action, could be Create or Update.
+ added:
+ description: List of resource objects that will be added.
+ type: list
+ capabilities:
+ description: AWS compatible template listing capabilities.
+ type: list
+ created_at:
+ description: Time when created.
type: str
- sample: "CREATE"
- creation_time:
- description: Time when the action has been made.
+ sample: "2016-07-05T17:38:12Z"
+ deleted:
+ description: A list of resource objects that will be deleted.
+ type: list
+ deleted_at:
+ description: Time when the deleted.
type: str
sample: "2016-07-05T17:38:12Z"
description:
- description: Description of the Stack provided in the heat template.
+ description: >
+ Description of the Stack provided in the heat
+ template.
type: str
sample: "HOT template to create a new instance and networks"
+ environment:
+ description: A JSON environment for the stack.
+ type: dict
+ environment_files:
+ description: >
+ An ordered list of names for environment files found
+ in the files dict.
+ type: list
+ files:
+ description: >
+ Additional files referenced in the template or
+ the environment
+ type: dict
+ files_container:
+ description: >
+ Name of swift container with child templates and
+ files.
+ type: str
id:
description: Stack ID.
type: str
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+ is_rollback_disabled:
+ description: Whether the stack will support a rollback.
+ type: bool
+ links:
+ description: Links to the current Stack.
+ type: list
+ elements: dict
+ sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/
+ 97a3f543-8136-4570-920e-fd7605c989d6']"
name:
description: Name of the Stack
type: str
sample: "test-stack"
- identifier:
- description: Identifier of the current Stack action.
+ notification_topics:
+ description: Stack related events.
type: str
- sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
- links:
- description: Links to the current Stack.
- type: list
- elements: dict
- sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
+ sample: "HOT template to create a new instance and networks"
outputs:
description: Output returned by the Stack.
type: list
elements: dict
- sample: "{'description': 'IP address of server1 in private network',
+ sample: "[{'description': 'IP of server1 in private network',
'output_key': 'server1_private_ip',
- 'output_value': '10.1.10.103'}"
+ 'output_value': '10.1.10.103'}]"
+ owner_id:
+ description: The ID of the owner stack if any.
+ type: str
parameters:
description: Parameters of the current Stack
type: dict
@@ -138,70 +171,87 @@ stack:
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
- 'stack_status_reason': 'Stack CREATE completed successfully',
+ 'stack_status_reason':
+ 'Stack CREATE completed successfully',
'status': 'COMPLETE',
- 'template_description': 'HOT template to create a new instance and networks',
+ 'template_description':
+ 'HOT template to create a new instance and nets',
'timeout_mins': 60,
'updated_time': null}"
+ parent_id:
+ description: The ID of the parent stack if any.
+ type: str
+ replaced:
+ description: A list of resource objects that will be replaced.
+ type: str
+ status:
+ description: stack status.
+ type: str
+ status_reason:
+ description: >
+ Explaining how the stack transits to its current
+ status.
+ type: str
+ tags:
+ description: A list of strings used as tags on the stack
+ type: list
+ template:
+ description: A dict containing the template use for stack creation.
+ type: dict
+ template_description:
+ description: Stack template description text.
+ type: str
+ template_url:
+ description: The URL where a stack template can be found.
+ type: str
+ timeout_mins:
+ description: Stack operation timeout in minutes.
+ type: str
+ unchanged:
+ description: >
+ A list of resource objects that will remain unchanged
+ if a stack.
+ type: list
+ updated:
+ description: >
+ A list of resource objects that will have their
+ properties updated.
+ type: list
+ updated_at:
+ description: Timestamp of last update on the stack.
+ type: str
+ user_project_id:
+ description: The ID of the user project created for this stack.
+ type: str
'''
-
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class StackModule(OpenStackModule):
argument_spec = dict(
+ environment=dict(type='list', elements='str'),
name=dict(required=True),
- tag=dict(required=False, default=None, min_ver='0.28.0'),
- template=dict(default=None),
- environment=dict(default=None, type='list', elements='str'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
- timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
+ tags=dict(aliases=['tag']),
+ template=dict(),
+ timeout=dict(default=3600, type='int'),
)
module_kwargs = dict(
- supports_check_mode=True
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ('template',), True)]
)
- def _create_stack(self, stack, parameters):
- stack = self.conn.create_stack(
- self.params['name'],
- template_file=self.params['template'],
- environment_files=self.params['environment'],
- timeout=self.params['timeout'],
- wait=True,
- rollback=self.params['rollback'],
- **parameters)
-
- stack = self.conn.get_stack(stack.id, None)
- if stack.stack_status == 'CREATE_COMPLETE':
- return stack
- else:
- self.fail_json(msg="Failure in creating stack: {0}".format(stack))
-
- def _update_stack(self, stack, parameters):
- stack = self.conn.update_stack(
- self.params['name'],
- template_file=self.params['template'],
- environment_files=self.params['environment'],
- timeout=self.params['timeout'],
- rollback=self.params['rollback'],
- wait=self.params['wait'],
- **parameters)
-
- if stack['stack_status'] == 'UPDATE_COMPLETE':
- return stack
- else:
- self.fail_json(msg="Failure in updating stack: %s" %
- stack['stack_status_reason'])
-
- def _system_state_change(self, stack):
- state = self.params['state']
+ def _system_state_change(self, stack, state):
if state == 'present':
- if not stack:
- return True
+ # This method will always return True if state is present to
+ # include the case of stack update as there is no simple way
+ # to check if the stack will indeed be updated
+ return True
if state == 'absent' and stack:
return True
return False
@@ -209,34 +259,57 @@ class StackModule(OpenStackModule):
def run(self):
state = self.params['state']
name = self.params['name']
- # Check for required parameters when state == 'present'
- if state == 'present':
- for p in ['template']:
- if not self.params[p]:
- self.fail_json(msg='%s required with present state' % p)
+ # self.conn.get_stack() will not return stacks with status ==
+ # DELETE_COMPLETE while self.conn.orchestration.find_stack() will
+ # do so. A name of a stack which has been deleted completely can be
+ # reused to create a new stack, hence we want self.conn.get_stack()'s
+ # behaviour here.
stack = self.conn.get_stack(name)
if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(stack))
+ self.exit_json(changed=self._system_state_change(stack, state))
if state == 'present':
- parameters = self.params['parameters']
- if not stack:
- stack = self._create_stack(stack, parameters)
+ # assume an existing stack always requires updates because there is
+ # no simple way to check if stack will indeed have to be updated
+ is_update = bool(stack)
+ kwargs = dict(
+ template_file=self.params['template'],
+ environment_files=self.params['environment'],
+ timeout=self.params['timeout'],
+ rollback=self.params['rollback'],
+ #
+ # Always wait because we expect status to be
+ # CREATE_COMPLETE or UPDATE_COMPLETE
+ wait=True,
+ )
+
+ tags = self.params['tags']
+ if tags is not None:
+ kwargs['tags'] = tags
+
+ extra_kwargs = self.params['parameters']
+ dup_kwargs = set(kwargs.keys()) & set(extra_kwargs.keys())
+ if dup_kwargs:
+ raise ValueError('Duplicate key(s) {0} in parameters'
+ .format(list(dup_kwargs)))
+ kwargs = dict(kwargs, **extra_kwargs)
+
+ if not is_update:
+ stack = self.conn.create_stack(name, **kwargs)
else:
- stack = self._update_stack(stack, parameters)
- self.exit_json(changed=True,
- stack=stack,
- id=stack.id)
+ stack = self.conn.update_stack(name, **kwargs)
+
+ stack = self.conn.orchestration.get_stack(stack['id'])
+ self.exit_json(changed=True, stack=stack.to_dict(computed=False))
elif state == 'absent':
if not stack:
- changed = False
+ self.exit_json(changed=False)
else:
- changed = True
- if not self.conn.delete_stack(name, wait=self.params['wait']):
- self.fail_json(msg='delete stack failed for stack: %s' % name)
- self.exit_json(changed=changed)
+ self.conn.delete_stack(name_or_id=stack['id'],
+ wait=self.params['wait'])
+ self.exit_json(changed=True)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/stack_info.py b/ansible_collections/openstack/cloud/plugins/modules/stack_info.py
index ce56995a4..9c1232121 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/stack_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/stack_info.py
@@ -1,106 +1,238 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2020, Sagi Shnaidman <sshnaidm@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: stack_info
-short_description: Retrive information about Heat stacks
+short_description: Retrieve information about Heat stacks
author: OpenStack Ansible SIG
description:
- - Get information about Heat stack in openstack
+ - Get information about Heat stack in OpenStack
options:
name:
description:
- - Name of the stack as a string.
+ - Name of the stack.
type: str
- required: false
- status:
+ owner:
description:
- - Value of the status of the stack so that you can filter on "available" for example
+ - Name or ID of the parent stack.
type: str
- required: false
- project_id:
+ aliases: ['owner_id']
+ project:
description:
- - Project ID to be used as filter
+ - Name or ID of the project.
type: str
- required: false
- owner_id:
+ aliases: ['project_id']
+ status:
description:
- - Owner (parent) of the stack to be used as a filter
+ - Status of the stack such as C(available)
type: str
- required: false
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-RETURN = '''
+EXAMPLES = r'''
+- name: Fetch all Heat stacks
+ openstack.cloud.stack_info:
+ cloud: devstack
+
+- name: Fetch a single Heat stack
+ openstack.cloud.stack_info:
+ cloud: devstack
+ name: my_stack
+'''
+
+RETURN = r'''
stacks:
description: List of dictionaries describing stacks.
type: list
elements: dict
returned: always.
contains:
+ added:
+ description: List of resource objects that will be added.
+ type: list
+ capabilities:
+ description: AWS compatible template listing capabilities.
+ type: list
+ created_at:
+ description: Time when created.
+ type: str
+ sample: "2016-07-05T17:38:12Z"
+ deleted:
+ description: A list of resource objects that will be deleted.
+ type: list
+ deleted_at:
+ description: Time when the deleted.
+ type: str
+ sample: "2016-07-05T17:38:12Z"
+ description:
+ description: >
+ Description of the Stack provided in the heat
+ template.
+ type: str
+ sample: "HOT template to create a new instance and networks"
+ environment:
+ description: A JSON environment for the stack.
+ type: dict
+ environment_files:
+ description: >
+ An ordered list of names for environment files found
+ in the files dict.
+ type: list
+ files:
+ description: >
+ Additional files referenced in the template or
+ the environment
+ type: dict
+ files_container:
+ description: >
+ Name of swift container with child templates and
+ files.
+ type: str
id:
- description: Unique UUID.
+ description: Stack ID.
+ type: str
+ sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+ is_rollback_disabled:
+ description: Whether the stack will support a rollback.
+ type: bool
+ links:
+ description: Links to the current Stack.
+ type: list
+ elements: dict
+ sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/
+ 97a3f543-8136-4570-920e-fd7605c989d6']"
+ name:
+ description: Name of the Stack
+ type: str
+ sample: "test-stack"
+ notification_topics:
+ description: Stack related events.
+ type: str
+ sample: "HOT template to create a new instance and networks"
+ outputs:
+ description: Output returned by the Stack.
+ type: list
+ elements: dict
+ sample: "[{'description': 'IP of server1 in private network',
+ 'output_key': 'server1_private_ip',
+ 'output_value': '10.1.10.103'}]"
+ owner_id:
+ description: The ID of the owner stack if any.
+ type: str
+ parameters:
+ description: Parameters of the current Stack
+ type: dict
+ sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
+ 'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
+ 'OS::stack_name': 'test-stack',
+ 'stack_status': 'CREATE_COMPLETE',
+ 'stack_status_reason':
+ 'Stack CREATE completed successfully',
+ 'status': 'COMPLETE',
+ 'template_description':
+ 'HOT template to create a new instance and nets',
+ 'timeout_mins': 60,
+ 'updated_time': null}"
+ parent_id:
+ description: The ID of the parent stack if any.
+ type: str
+ replaced:
+ description: A list of resource objects that will be replaced.
type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
status:
- description: Stack status.
+ description: stack status.
+ type: str
+ status_reason:
+ description: >
+ Explaining how the stack transits to its current
+ status.
+ type: str
+ tags:
+ description: A list of strings used as tags on the stack
+ type: list
+ template:
+ description: A dict containing the template use for stack creation.
+ type: dict
+ template_description:
+ description: Stack template description text.
+ type: str
+ template_url:
+ description: The URL where a stack template can be found.
+ type: str
+ timeout_mins:
+ description: Stack operation timeout in minutes.
+ type: str
+ unchanged:
+ description: >
+ A list of resource objects that will remain unchanged
+ if a stack.
+ type: list
+ updated:
+ description: >
+ A list of resource objects that will have their
+ properties updated.
+ type: list
+ updated_at:
+ description: Timestamp of last update on the stack.
+ type: str
+ user_project_id:
+ description: The ID of the user project created for this stack.
type: str
-
-'''
-
-EXAMPLES = '''
-# Get backups.
-- openstack.cloud.stack_info:
- register: stack
-
-- openstack.cloud.stack_info:
- name: my_stack
- register: stack
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class StackInfoModule(OpenStackModule):
- module_min_sdk_version = '0.53.0'
-
argument_spec = dict(
- name=dict(required=False, type='str'),
- status=dict(required=False, type='str'),
- project_id=dict(required=False, type='str'),
- owner_id=dict(required=False, type='str')
+ name=dict(),
+ owner=dict(aliases=['owner_id']),
+ project=dict(aliases=['project_id']),
+ status=dict(),
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- data = []
- attrs = {}
-
- for param in ['name', 'status', 'project_id', 'owner_id']:
- if self.params[param]:
- attrs[param] = self.params[param]
-
- for raw in self.conn.orchestration.stacks(**attrs):
- dt = raw.to_dict()
- dt.pop('location')
- data.append(dt)
-
- self.exit_json(
- changed=False,
- stacks=data
- )
+ kwargs = {}
+
+ owner_name_or_id = self.params['owner']
+ if owner_name_or_id:
+ owner = self.conn.orchestration.find_stack(owner_name_or_id)
+ if owner:
+ kwargs['owner_id'] = owner['id']
+ else:
+ # Owner could not be found so return empty list of stacks
+ # because *_info modules never raise errors on missing
+ # resources
+ self.exit_json(changed=False, stacks=[])
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id:
+ project = self.conn.identity.find_project(project_name_or_id)
+ if project:
+ kwargs['project_id'] = project['id']
+ else:
+ # Project could not be found so return empty list of stacks
+ # because *_info modules never raise errors on missing
+ # resources
+ self.exit_json(changed=False, stacks=[])
+
+ for k in ['name', 'status']:
+ if self.params[k] is not None:
+ kwargs[k] = self.params[k]
+
+ stacks = [stack.to_dict(computed=False)
+ for stack in self.conn.orchestration.stacks(**kwargs)]
+
+ self.exit_json(changed=False, stacks=stacks)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/subnet.py b/ansible_collections/openstack/cloud/plugins/modules/subnet.py
index dfe1eaca3..d8da4b5db 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/subnet.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/subnet.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -12,108 +12,121 @@ author: OpenStack Ansible SIG
description:
- Add or Remove a subnet to an OpenStack network
options:
- state:
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- type: str
- network_name:
- description:
- - Name of the network to which the subnet should be attached
- - Required when I(state) is 'present'
- type: str
- name:
- description:
- - The name of the subnet that should be created. Although Neutron
- allows for non-unique subnet names, this module enforces subnet
- name uniqueness.
- required: true
- type: str
- cidr:
- description:
- - The CIDR representation of the subnet that should be assigned to
- the subnet. Required when I(state) is 'present' and a subnetpool
- is not specified.
- type: str
- ip_version:
- description:
- - The IP version of the subnet 4 or 6
- default: '4'
- type: str
- choices: ['4', '6']
- enable_dhcp:
- description:
- - Whether DHCP should be enabled for this subnet.
- type: bool
- default: 'yes'
- gateway_ip:
- description:
- - The ip that would be assigned to the gateway for this subnet
- type: str
- no_gateway_ip:
- description:
- - The gateway IP would not be assigned for this subnet
- type: bool
- default: 'no'
- dns_nameservers:
- description:
- - List of DNS nameservers for this subnet.
- type: list
- elements: str
- allocation_pool_start:
- description:
- - From the subnet pool the starting address from which the IP should
- be allocated.
- type: str
- allocation_pool_end:
- description:
- - From the subnet pool the last IP that should be assigned to the
- virtual machines.
- type: str
- host_routes:
- description:
- - A list of host route dictionaries for the subnet.
- type: list
- elements: dict
- suboptions:
- destination:
- description: The destination network (CIDR).
- type: str
- required: true
- nexthop:
- description: The next hop (aka gateway) for the I(destination).
- type: str
- required: true
- ipv6_ra_mode:
- description:
- - IPv6 router advertisement mode
- choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
- type: str
- ipv6_address_mode:
- description:
- - IPv6 address mode
- choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
- type: str
- use_default_subnetpool:
- description:
- - Use the default subnetpool for I(ip_version) to obtain a CIDR.
- type: bool
- default: 'no'
- project:
- description:
- - Project name or ID containing the subnet (name admin-only)
- type: str
- extra_specs:
- description:
- - Dictionary with extra key/value pairs passed to the API
- required: false
- default: {}
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ allocation_pool_start:
+ description:
+ - From the subnet pool the starting address from which the IP
+ should be allocated.
+ type: str
+ allocation_pool_end:
+ description:
+ - From the subnet pool the last IP that should be assigned to the
+ virtual machines.
+ type: str
+ cidr:
+ description:
+ - The CIDR representation of the subnet that should be assigned to
+ the subnet. Required when I(state) is 'present' and a subnetpool
+ is not specified.
+ type: str
+ description:
+ description:
+ - Description of the subnet
+ type: str
+ disable_gateway_ip:
+ description:
+ - The gateway IP would not be assigned for this subnet
+ type: bool
+ aliases: ['no_gateway_ip']
+ default: 'false'
+ dns_nameservers:
+ description:
+ - List of DNS nameservers for this subnet.
+ type: list
+ elements: str
+ extra_attrs:
+ description:
+ - Dictionary with extra key/value pairs passed to the API
+ required: false
+ aliases: ['extra_specs']
+ default: {}
+ type: dict
+ host_routes:
+ description:
+ - A list of host route dictionaries for the subnet.
+ type: list
+ elements: dict
+ suboptions:
+ destination:
+ description: The destination network (CIDR).
+ type: str
+ required: true
+ nexthop:
+ description: The next hop (aka gateway) for the I(destination).
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - The ip that would be assigned to the gateway for this subnet
+ type: str
+ ip_version:
+ description:
+ - The IP version of the subnet 4 or 6
+ default: 4
+ type: int
+ choices: [4, 6]
+ is_dhcp_enabled:
+ description:
+ - Whether DHCP should be enabled for this subnet.
+ type: bool
+ aliases: ['enable_dhcp']
+ default: 'true'
+ ipv6_ra_mode:
+ description:
+ - IPv6 router advertisement mode
+ choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
+ type: str
+ ipv6_address_mode:
+ description:
+ - IPv6 address mode
+ choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
+ type: str
+ name:
+ description:
+ - The name of the subnet that should be created. Although Neutron
+ allows for non-unique subnet names, this module enforces subnet
+ name uniqueness.
+ required: true
+ type: str
+ network:
+ description:
+ - Name or id of the network to which the subnet should be attached
+ - Required when I(state) is 'present'
+ aliases: ['network_name']
+ type: str
+ project:
+ description:
+ - Project name or ID containing the subnet (name admin-only)
+ type: str
+ prefix_length:
+ description:
+ - The prefix length to use for subnet allocation from a subnet pool
+ type: str
+ use_default_subnet_pool:
+ description:
+ - Use the default subnetpool for I(ip_version) to obtain a CIDR.
+ type: bool
+ aliases: ['use_default_subnetpool']
+ subnet_pool:
+ description:
+ - The subnet pool name or ID from which to obtain a CIDR
+ type: str
+ required: false
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -153,206 +166,297 @@ EXAMPLES = '''
ipv6_address_mode: dhcpv6-stateless
'''
+RETURN = '''
+id:
+ description: Id of subnet
+ returned: On success when subnet exists.
+ type: str
+subnet:
+ description: Dictionary describing the subnet.
+ returned: On success when subnet exists.
+ type: dict
+ contains:
+ allocation_pools:
+ description: Allocation pools associated with this subnet.
+ returned: success
+ type: list
+ elements: dict
+ cidr:
+ description: Subnet's CIDR.
+ returned: success
+ type: str
+ created_at:
+ description: Created at timestamp
+ type: str
+ description:
+ description: Description
+ type: str
+ dns_nameservers:
+ description: DNS name servers for this subnet.
+ returned: success
+ type: list
+ elements: str
+ dns_publish_fixed_ip:
+ description: Whether to publish DNS records for fixed IPs.
+ returned: success
+ type: bool
+ gateway_ip:
+ description: Subnet's gateway ip.
+ returned: success
+ type: str
+ host_routes:
+ description: A list of host routes.
+ returned: success
+ type: str
+ id:
+ description: Unique UUID.
+ returned: success
+ type: str
+ ip_version:
+ description: IP version for this subnet.
+ returned: success
+ type: int
+ ipv6_address_mode:
+ description: |
+ The IPv6 address modes which are 'dhcpv6-stateful',
+ 'dhcpv6-stateless' or 'slaac'.
+ returned: success
+ type: str
+ ipv6_ra_mode:
+ description: |
+ The IPv6 router advertisements modes which can be 'slaac',
+ 'dhcpv6-stateful', 'dhcpv6-stateless'.
+ returned: success
+ type: str
+ is_dhcp_enabled:
+ description: DHCP enable flag for this subnet.
+ returned: success
+ type: bool
+ name:
+ description: Name given to the subnet.
+ returned: success
+ type: str
+ network_id:
+ description: Network ID this subnet belongs in.
+ returned: success
+ type: str
+ prefix_length:
+ description: |
+ The prefix length to use for subnet allocation from a subnet
+ pool.
+ returned: success
+ type: str
+ project_id:
+ description: Project id associated with this subnet.
+ returned: success
+ type: str
+ revision_number:
+ description: Revision number of the resource
+ returned: success
+ type: int
+ segment_id:
+ description: The ID of the segment this subnet is associated with.
+ returned: success
+ type: str
+ service_types:
+ description: Service types for this subnet
+ returned: success
+ type: list
+ subnet_pool_id:
+ description: The subnet pool ID from which to obtain a CIDR.
+ returned: success
+ type: str
+ tags:
+ description: Tags
+ type: str
+ updated_at:
+ description: Timestamp when the subnet was last updated.
+ returned: success
+ type: str
+ use_default_subnet_pool:
+ description: |
+ Whether to use the default subnet pool to obtain a CIDR.
+ returned: success
+ type: bool
+'''
+
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class SubnetModule(OpenStackModule):
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = dict(
- name=dict(type='str', required=True),
- network_name=dict(type='str'),
- cidr=dict(type='str'),
- ip_version=dict(type='str', default='4', choices=['4', '6']),
- enable_dhcp=dict(type='bool', default=True),
- gateway_ip=dict(type='str'),
- no_gateway_ip=dict(type='bool', default=False),
- dns_nameservers=dict(type='list', default=None, elements='str'),
- allocation_pool_start=dict(type='str'),
- allocation_pool_end=dict(type='str'),
- host_routes=dict(type='list', default=None, elements='dict'),
- ipv6_ra_mode=dict(type='str', choices=ipv6_mode_choices),
- ipv6_address_mode=dict(type='str', choices=ipv6_mode_choices),
- use_default_subnetpool=dict(type='bool', default=False),
- extra_specs=dict(type='dict', default=dict()),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- project=dict(type='str'),
+ name=dict(required=True),
+ network=dict(aliases=['network_name']),
+ cidr=dict(),
+ description=dict(),
+ ip_version=dict(type='int', default=4, choices=[4, 6]),
+ is_dhcp_enabled=dict(type='bool', default=True,
+ aliases=['enable_dhcp']),
+ gateway_ip=dict(),
+ disable_gateway_ip=dict(
+ type='bool', default=False, aliases=['no_gateway_ip']),
+ dns_nameservers=dict(type='list', elements='str'),
+ allocation_pool_start=dict(),
+ allocation_pool_end=dict(),
+ host_routes=dict(type='list', elements='dict'),
+ ipv6_ra_mode=dict(choices=ipv6_mode_choices),
+ ipv6_address_mode=dict(choices=ipv6_mode_choices),
+ subnet_pool=dict(),
+ prefix_length=dict(),
+ use_default_subnet_pool=dict(
+ type='bool', aliases=['use_default_subnetpool']),
+ extra_attrs=dict(type='dict', default=dict(), aliases=['extra_specs']),
+ state=dict(default='present',
+ choices=['absent', 'present']),
+ project=dict(),
)
module_kwargs = dict(
supports_check_mode=True,
- required_together=[['allocation_pool_end', 'allocation_pool_start']]
+ required_together=[['allocation_pool_end', 'allocation_pool_start']],
+ required_if=[
+ ('state', 'present', ('network',)),
+ ('state', 'present',
+ ('cidr', 'use_default_subnet_pool', 'subnet_pool'), True),
+ ],
+ mutually_exclusive=[
+ ('cidr', 'use_default_subnet_pool', 'subnet_pool')
+ ]
)
- def _can_update(self, subnet, filters=None):
- """Check for differences in non-updatable values"""
- network_name = self.params['network_name']
- ip_version = int(self.params['ip_version'])
- ipv6_ra_mode = self.params['ipv6_ra_mode']
- ipv6_a_mode = self.params['ipv6_address_mode']
-
- if network_name:
- network = self.conn.get_network(network_name, filters)
- if network:
- netid = network['id']
- if netid != subnet['network_id']:
- self.fail_json(msg='Cannot update network_name in existing subnet')
- else:
- self.fail_json(msg='No network found for %s' % network_name)
-
- if ip_version and subnet['ip_version'] != ip_version:
- self.fail_json(msg='Cannot update ip_version in existing subnet')
- if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
- self.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
- if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
- self.fail_json(msg='Cannot update ipv6_address_mode in existing subnet')
-
- def _needs_update(self, subnet, filters=None):
- """Check for differences in the updatable values."""
-
- # First check if we are trying to update something we're not allowed to
- self._can_update(subnet, filters)
+ # resource attributes obtainable directly from params
+ attr_params = ('cidr', 'description',
+ 'dns_nameservers', 'gateway_ip', 'host_routes',
+ 'ip_version', 'ipv6_address_mode', 'ipv6_ra_mode',
+ 'is_dhcp_enabled', 'name', 'prefix_length',
+ 'use_default_subnet_pool',)
+
+ def _validate_update(self, subnet, update):
+ """ Check for differences in non-updatable values """
+ # Ref.: https://docs.openstack.org/api-ref/network/v2/index.html#update-subnet
+ for attr in ('cidr', 'ip_version', 'ipv6_ra_mode', 'ipv6_address_mode',
+ 'prefix_length', 'use_default_subnet_pool'):
+ if attr in update and update[attr] != subnet[attr]:
+ self.fail_json(
+ msg='Cannot update {0} in existing subnet'.format(attr))
+
+ def _system_state_change(self, subnet, network, project, subnet_pool):
+ state = self.params['state']
+ if state == 'absent':
+ return subnet is not None
+ # else state is present
+ if not subnet:
+ return True
+ params = self._build_params(network, project, subnet_pool)
+ updates = self._build_updates(subnet, params)
+ self._validate_update(subnet, updates)
+ return bool(updates)
- # now check for the things we are allowed to update
- enable_dhcp = self.params['enable_dhcp']
- subnet_name = self.params['name']
+ def _build_pool(self):
pool_start = self.params['allocation_pool_start']
pool_end = self.params['allocation_pool_end']
- gateway_ip = self.params['gateway_ip']
- no_gateway_ip = self.params['no_gateway_ip']
- dns = self.params['dns_nameservers']
- host_routes = self.params['host_routes']
- if pool_start and pool_end:
- pool = dict(start=pool_start, end=pool_end)
- else:
- pool = None
-
- changes = dict()
- if subnet['enable_dhcp'] != enable_dhcp:
- changes['enable_dhcp'] = enable_dhcp
- if subnet_name and subnet['name'] != subnet_name:
- changes['subnet_name'] = subnet_name
- if pool and (not subnet['allocation_pools'] or subnet['allocation_pools'] != [pool]):
- changes['allocation_pools'] = [pool]
- if gateway_ip and subnet['gateway_ip'] != gateway_ip:
- changes['gateway_ip'] = gateway_ip
- if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
- changes['dns_nameservers'] = dns
- if host_routes:
- curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
- new_hr = sorted(host_routes, key=lambda t: t.keys())
- if curr_hr != new_hr:
- changes['host_routes'] = host_routes
- if no_gateway_ip and subnet['gateway_ip']:
- changes['disable_gateway_ip'] = no_gateway_ip
- return changes
-
- def _system_state_change(self, subnet, filters=None):
- state = self.params['state']
- if state == 'present':
- if not subnet:
- return True
- return bool(self._needs_update(subnet, filters))
- if state == 'absent' and subnet:
- return True
- return False
+ if pool_start:
+ return [dict(start=pool_start, end=pool_end)]
+ return None
+
+ def _build_params(self, network, project, subnet_pool):
+ params = {attr: self.params[attr] for attr in self.attr_params}
+ params['network_id'] = network.id
+ if project:
+ params['project_id'] = project.id
+ if subnet_pool:
+ params['subnet_pool_id'] = subnet_pool.id
+ params['allocation_pools'] = self._build_pool()
+ params = self._add_extra_attrs(params)
+ params = {k: v for k, v in params.items() if v is not None}
+ return params
+
+ def _build_updates(self, subnet, params):
+ # Sort lists before doing comparisons comparisons
+ if 'dns_nameservers' in params:
+ params['dns_nameservers'].sort()
+ subnet['dns_nameservers'].sort()
+
+ if 'host_routes' in params:
+ params['host_routes'].sort(key=lambda r: sorted(r.items()))
+ subnet['host_routes'].sort(key=lambda r: sorted(r.items()))
+
+ updates = {k: params[k] for k in params if params[k] != subnet[k]}
+ if self.params['disable_gateway_ip'] and subnet.gateway_ip:
+ updates['gateway_ip'] = None
+ return updates
+
+ def _add_extra_attrs(self, params):
+ duplicates = set(self.params['extra_attrs']) & set(params)
+ if duplicates:
+ self.fail_json(msg='Duplicate key(s) {0} in extra_specs'
+ .format(list(duplicates)))
+ params.update(self.params['extra_attrs'])
+ return params
def run(self):
-
state = self.params['state']
- network_name = self.params['network_name']
- cidr = self.params['cidr']
- ip_version = self.params['ip_version']
- enable_dhcp = self.params['enable_dhcp']
+ network_name_or_id = self.params['network']
+ project_name_or_id = self.params['project']
+ subnet_pool_name_or_id = self.params['subnet_pool']
subnet_name = self.params['name']
gateway_ip = self.params['gateway_ip']
- no_gateway_ip = self.params['no_gateway_ip']
- dns = self.params['dns_nameservers']
- pool_start = self.params['allocation_pool_start']
- pool_end = self.params['allocation_pool_end']
- host_routes = self.params['host_routes']
- ipv6_ra_mode = self.params['ipv6_ra_mode']
- ipv6_a_mode = self.params['ipv6_address_mode']
- use_default_subnetpool = self.params['use_default_subnetpool']
- project = self.params.pop('project')
- extra_specs = self.params['extra_specs']
-
- # Check for required parameters when state == 'present'
- if state == 'present':
- if not self.params['network_name']:
- self.fail(msg='network_name required with present state')
- if (
- not self.params['cidr']
- and not use_default_subnetpool
- and not extra_specs.get('subnetpool_id', False)
- ):
- self.fail(msg='cidr or use_default_subnetpool or '
- 'subnetpool_id required with present state')
-
- if pool_start and pool_end:
- pool = [dict(start=pool_start, end=pool_end)]
- else:
- pool = None
-
- if no_gateway_ip and gateway_ip:
- self.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
+ disable_gateway_ip = self.params['disable_gateway_ip']
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail_json(msg='Project %s could not be found' % project)
- project_id = proj['id']
- filters = {'tenant_id': project_id}
- else:
- project_id = None
- filters = None
+ # fail early if incompatible options have been specified
+ if disable_gateway_ip and gateway_ip:
+ self.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
- subnet = self.conn.get_subnet(subnet_name, filters=filters)
+ subnet_pool_filters = {}
+ filters = {}
+
+ project = None
+ if project_name_or_id:
+ project = self.conn.identity.find_project(project_name_or_id,
+ ignore_missing=False)
+ subnet_pool_filters['project_id'] = project.id
+ filters['project_id'] = project.id
+
+ network = None
+ if network_name_or_id:
+ # At this point filters can only contain project_id
+ network = self.conn.network.find_network(network_name_or_id,
+ ignore_missing=False,
+ **filters)
+ filters['network_id'] = network.id
+
+ subnet_pool = None
+ if subnet_pool_name_or_id:
+ subnet_pool = self.conn.network.find_subnet_pool(
+ subnet_pool_name_or_id,
+ ignore_missing=False,
+ **subnet_pool_filters)
+ filters['subnet_pool_id'] = subnet_pool.id
+
+ subnet = self.conn.network.find_subnet(subnet_name, **filters)
if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change(subnet, filters))
+ self.exit_json(changed=self._system_state_change(
+ subnet, network, project, subnet_pool))
+ changed = False
if state == 'present':
- if not subnet:
- kwargs = dict(
- cidr=cidr,
- ip_version=ip_version,
- enable_dhcp=enable_dhcp,
- subnet_name=subnet_name,
- gateway_ip=gateway_ip,
- disable_gateway_ip=no_gateway_ip,
- dns_nameservers=dns,
- allocation_pools=pool,
- host_routes=host_routes,
- ipv6_ra_mode=ipv6_ra_mode,
- ipv6_address_mode=ipv6_a_mode,
- tenant_id=project_id)
- dup_args = set(kwargs.keys()) & set(extra_specs.keys())
- if dup_args:
- raise ValueError('Duplicate key(s) {0} in extra_specs'
- .format(list(dup_args)))
- if use_default_subnetpool:
- kwargs['use_default_subnetpool'] = use_default_subnetpool
- kwargs = dict(kwargs, **extra_specs)
- subnet = self.conn.create_subnet(network_name, **kwargs)
+ params = self._build_params(network, project, subnet_pool)
+ if subnet is None:
+ subnet = self.conn.network.create_subnet(**params)
changed = True
else:
- changes = self._needs_update(subnet, filters)
- if changes:
- subnet = self.conn.update_subnet(subnet['id'], **changes)
+ updates = self._build_updates(subnet, params)
+ if updates:
+ self._validate_update(subnet, updates)
+ subnet = self.conn.network.update_subnet(subnet, **updates)
changed = True
- else:
- changed = False
- self.exit_json(changed=changed,
- subnet=subnet,
- id=subnet['id'])
-
- elif state == 'absent':
- if not subnet:
- changed = False
- else:
- changed = True
- self.conn.delete_subnet(subnet_name)
- self.exit_json(changed=changed)
+ self.exit_json(changed=changed, subnet=subnet, id=subnet.id)
+ elif state == 'absent' and subnet is not None:
+ self.conn.network.delete_subnet(subnet)
+ changed = True
+ self.exit_json(changed=changed)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/subnet_pool.py b/ansible_collections/openstack/cloud/plugins/modules/subnet_pool.py
index 4272438fd..a46f4145a 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/subnet_pool.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/subnet_pool.py
@@ -1,103 +1,92 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2021 by Uemit Seren <uemit.seren@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: subnet_pool
-short_description: Create or delete subnet pools from OpenStack
+short_description: Create, update or delete a subnet pool from OpenStack
author: OpenStack Ansible SIG
description:
- - Create or Delete subnet pools from OpenStack.
+ - Create, update or delete a subnet pool from OpenStack.
options:
- state:
+ address_scope:
description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
+ - ID or name of the address scope associated with this subnet pool.
type: str
- name:
+ default_prefix_length:
description:
- - Name to be give to the subnet pool
- required: true
- type: str
- project:
+ - The prefix length to allocate when the cidr or prefixlen attributes
+ are omitted when creating a subnet.
+ type: int
+ default_quota:
description:
- - Unique name or ID of the project.
+ - A per-project quota on the prefix space that can be allocated
+ from the subnet pool for project subnets.
+ type: int
+ description:
+ description: The subnet pool description.
type: str
- prefixes:
+ extra_specs:
description:
- - Set subnet pool prefixes (in CIDR notation)
- type: list
- elements: str
- minimum_prefix_length:
+ - Dictionary with extra key/value pairs passed to the API.
+ type: dict
+ is_default:
description:
- - The minimum prefix length that can be allocated from the subnet pool.
- required: False
- type: int
+ - Whether this subnet pool is the default.
+ type: bool
+ is_shared:
+ description:
+ - Whether this subnet pool is shared or not.
+ - This attribute cannot be updated.
+ type: bool
+ aliases: ['shared']
maximum_prefix_length:
description:
- The maximum prefix length that can be allocated from the subnet pool.
- required: False
type: int
- default_prefix_length:
+ minimum_prefix_length:
description:
- - The length of the prefix to allocate when the cidr or prefixlen attributes
- are omitted when creating a subnet
+ - The minimum prefix length that can be allocated from the subnet pool.
type: int
- required: False
- address_scope:
- description:
- - Set address scope (ID or name) associated with the subnet pool
- type: str
- required: False
- is_default:
+ name:
description:
- - Whether this subnet pool is by default
- type: bool
- default: 'no'
- description:
- description: The subnet pool description
+ - Name to be give to the subnet pool.
+ - This attribute cannot be updated.
+ required: true
type: str
- required: False
- default_quota:
+ prefixes:
description:
- - A per-project quota on the prefix space that can be allocated
- from the subnet pool for project subnets
- required: False
- type: int
- shared:
+ - Subnet pool prefixes in CIDR notation.
+ type: list
+ elements: str
+ project:
description:
- - Whether this subnet pool is shared or not.
- type: bool
- default: 'no'
- extra_specs:
+ - Name or ID of the project.
+ type: str
+ state:
description:
- - Dictionary with extra key/value pairs passed to the API
- required: false
- default: {}
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ - Whether the subnet pool should be C(present) or C(absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Create an subnet pool.
-- openstack.cloud.subnet_pool:
+EXAMPLES = r'''
+- name: Create an subnet pool.
+ openstack.cloud.subnet_pool:
cloud: mycloud
state: present
name: my_subnet_pool
prefixes:
- 10.10.10.0/24
-# Create a subnet pool for a given project.
-- openstack.cloud.subnet_pool:
+- name: Create a subnet pool for a given project.
+ openstack.cloud.subnet_pool:
cloud: mycloud
state: present
name: my_subnet_pool
@@ -105,8 +94,8 @@ EXAMPLES = '''
prefixes:
- 10.10.10.0/24
-# Create a shared and default subnet pool in existing address scope
-- openstack.cloud.subnet_pool:
+- name: Create a shared and default subnet pool in existing address scope
+ openstack.cloud.subnet_pool:
cloud: mycloud
state: present
name: my_subnet_pool
@@ -116,47 +105,23 @@ EXAMPLES = '''
maximum_prefix_length: 32
minimum_prefix_length: 8
default_prefix_length: 24
- shared: True
+ is_shared: True
prefixes:
- 10.10.10.0/8
-# Delete subnet poool.
-- openstack.cloud.subnet_pool:
+- name: Delete subnet poool.
+ openstack.cloud.subnet_pool:
cloud: mycloud
state: absent
name: my_subnet_pool
'''
-RETURN = '''
+RETURN = r'''
subnet_pool:
description: Dictionary describing the subnet pool.
- returned: On success when I(state) is 'present'
- type: complex
+ returned: On success when I(state) is C(present).
+ type: dict
contains:
- id:
- description: Subnet Pool ID.
- type: str
- sample: "474acfe5-be34-494c-b339-50f06aa143e4"
- name:
- description: Subnet Pool name.
- type: str
- sample: "my_subnet_pool"
- project_id:
- description: The ID of the project.
- type: str
- sample: "861174b82b43463c9edc5202aadc60ef"
- ip_version:
- description: The IP version of the subnet pool 4 or 6.
- type: int
- sample: 4
- is_shared:
- description: Indicates whether this subnet pool is shared across all projects.
- type: bool
- sample: false
- is_default:
- description: Indicates whether this is the default subnet pool.
- type: bool
- sample: false
address_scope_id:
description: The address scope ID.
type: str
@@ -166,37 +131,72 @@ subnet_pool:
type: str
sample: ""
default_prefix_length:
- description:
- - The length of the prefix to allocate when the cidr or prefixlen
- attributes are omitted when creating a subnet
+ description: The length of the prefix to allocate when the cidr or
+ prefixlen attributes are omitted when creating a
+ subnet.
type: int
sample: 32
default_quota:
- description:
- - The per-project quota on the prefix space that can be allocated
- from the subnet pool for project subnets.
+ description: The per-project quota on the prefix space that can be
+ allocated from the subnet pool for project subnets.
type: int
sample: 22
description:
description: The subnet pool description.
type: str
sample: "My test subnet pool."
+ id:
+ description: Subnet Pool ID.
+ type: str
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ ip_version:
+ description: The IP version of the subnet pool 4 or 6.
+ type: int
+ sample: 4
+ is_default:
+ description: Indicates whether this is the default subnet pool.
+ type: bool
+ sample: false
+ is_shared:
+ description: Indicates whether this subnet pool is shared across
+ all projects.
+ type: bool
+ sample: false
maximum_prefix_length:
- description: The maximum prefix length that can be allocated from the subnet pool.
+ description: The maximum prefix length that can be allocated from
+ the subnet pool.
type: int
sample: 22
minimum_prefix_length:
- description: The minimum prefix length that can be allocated from the subnet pool.
+ description: The minimum prefix length that can be allocated from
+ the subnet pool.
type: int
sample: 8
+ name:
+ description: Subnet Pool name.
+ type: str
+ sample: "my_subnet_pool"
prefixes:
- description: A list of subnet prefixes that are assigned to the subnet pool.
+ description: A list of subnet prefixes that are assigned to the
+ subnet pool.
type: list
sample: ['10.10.20.0/24', '10.20.10.0/24']
+ project_id:
+ description: The ID of the project.
+ type: str
+ sample: "861174b82b43463c9edc5202aadc60ef"
revision_number:
description: Revision number of the subnet pool.
type: int
sample: 5
+ tags:
+ description: A list of associated tags.
+ returned: success
+ type: list
+ tenant_id:
+ description: The ID of the project. Deprecated.
+ type: str
+ sample: "861174b82b43463c9edc5202aadc60ef"
updated_at:
description: Timestamp when the subnet pool was last updated.
type: str
@@ -208,132 +208,150 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class SubnetPoolModule(OpenStackModule):
argument_spec = dict(
- state=dict(default='present', choices=['absent', 'present']),
+ address_scope=dict(),
+ default_prefix_length=dict(type='int'),
+ default_quota=dict(type='int'),
+ description=dict(),
+ extra_specs=dict(type='dict'),
+ is_default=dict(type='bool'),
+ is_shared=dict(type='bool', aliases=['shared']),
+ maximum_prefix_length=dict(type='int'),
+ minimum_prefix_length=dict(type='int'),
name=dict(required=True),
- shared=dict(default=False, type='bool'),
- minimum_prefix_length=dict(default=None, type='int'),
- maximum_prefix_length=dict(default=None, type='int'),
- default_prefix_length=dict(default=None, type='int'),
- description=dict(default=None, type='str'),
- default_quota=dict(default=None, type='int'),
prefixes=dict(type='list', elements='str'),
- is_default=dict(default=False, type='bool'),
- address_scope=dict(default=None),
- project=dict(default=None),
- extra_specs=dict(type='dict', default=dict())
+ project=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
)
- def _needs_update(self, subnet_pool):
- """Check for differences in the updatable values.
-
- NOTE: We don't currently allow name updates.
- """
- compare_simple = ['is_default',
- 'minimum_prefix_length',
- 'maximum_prefix_length',
- 'default_prefix_length',
- 'description',
- 'default_quota']
- compare_list = ['prefixes']
-
- for key in compare_simple:
- if self.params[key] is not None and self.params[key] != subnet_pool[key]:
- return True
- for key in compare_list:
- if (
- self.params[key] is not None
- and set(self.params[key]) != set(subnet_pool[key])
- ):
- return True
-
- return False
-
- def _system_state_change(self, subnet_pool, filters=None):
- """Check if the system state would be changed."""
- state = self.params['state']
- if state == 'absent' and subnet_pool:
- return True
- if state == 'present':
- if not subnet_pool:
- return True
- return self._needs_update(subnet_pool, filters)
- return False
-
- def _compose_subnet_pool_args(self):
- subnet_pool_kwargs = {}
- optional_parameters = ['name',
- 'minimum_prefix_length',
- 'maximum_prefix_length',
- 'default_prefix_length',
- 'description',
- 'is_default',
- 'default_quota',
- 'prefixes']
-
- for optional_param in optional_parameters:
- if self.params[optional_param] is not None:
- subnet_pool_kwargs[optional_param] = self.params[optional_param]
-
- return subnet_pool_kwargs
-
def run(self):
-
state = self.params['state']
+
name = self.params['name']
- project = self.params['project']
- address_scope = self.params['address_scope']
+ subnet_pool = self.conn.network.find_subnet_pool(name)
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, subnet_pool))
+
+ if state == 'present' and not subnet_pool:
+ # Create subnet_pool
+ subnet_pool = self._create()
+ self.exit_json(changed=True,
+ subnet_pool=subnet_pool.to_dict(computed=False))
+
+ elif state == 'present' and subnet_pool:
+ # Update subnet_pool
+ update = self._build_update(subnet_pool)
+ if update:
+ subnet_pool = self._update(subnet_pool, update)
+
+ self.exit_json(changed=bool(update),
+ subnet_pool=subnet_pool.to_dict(computed=False))
+
+ elif state == 'absent' and subnet_pool:
+ # Delete subnet_pool
+ self._delete(subnet_pool)
+ self.exit_json(changed=True)
+
+ elif state == 'absent' and not subnet_pool:
+ # Do nothing
+ self.exit_json(changed=False)
+
+ def _build_update(self, subnet_pool):
+ update = {}
+
+ attributes = dict((k, self.params[k])
+ for k in ['default_prefix_length', 'default_quota',
+ 'description', 'is_default',
+ 'maximum_prefix_length',
+ 'minimum_prefix_length']
+ if self.params[k] is not None
+ and self.params[k] != subnet_pool[k])
+
+ for k in ['prefixes']:
+ if self.params[k] is not None \
+ and set(self.params[k]) != set(subnet_pool[k]):
+ attributes[k] = self.params[k]
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(project_name_or_id,
+ ignore_missing=False)
+ if subnet_pool['project_id'] != project.id:
+ attributes['project_id'] = project.id
+
+ address_scope_name_or_id = self.params['address_scope']
+ if address_scope_name_or_id is not None:
+ address_scope = self.conn.network.find_address_scope(
+ address_scope_name_or_id, ignore_missing=False)
+ if subnet_pool['address_scope_id'] != address_scope.id:
+ attributes['address_scope_id'] = address_scope.id
+
extra_specs = self.params['extra_specs']
+ if extra_specs:
+ duplicate_keys = set(attributes.keys()) & set(extra_specs.keys())
+ if duplicate_keys:
+ raise ValueError('Duplicate key(s) in extra_specs: {0}'
+ .format(', '.join(list(duplicate_keys))))
+ for k, v in extra_specs.items():
+ if v != subnet_pool[k]:
+ attributes[k] = v
+
+ if attributes:
+ update['attributes'] = attributes
+
+ return update
- if project is not None:
- proj = self.conn.get_project(project)
- if proj is None:
- self.fail(msg='Project %s could not be found' % project)
- project_id = proj['id']
+ def _create(self):
+ kwargs = dict((k, self.params[k])
+ for k in ['default_prefix_length', 'default_quota',
+ 'description', 'is_default', 'is_shared',
+ 'maximum_prefix_length',
+ 'minimum_prefix_length', 'name', 'prefixes']
+ if self.params[k] is not None)
+
+ project_name_or_id = self.params['project']
+ if project_name_or_id is not None:
+ project = self.conn.identity.find_project(project_name_or_id,
+ ignore_missing=False)
+ kwargs['project_id'] = project.id
+
+ address_scope_name_or_id = self.params['address_scope']
+ if address_scope_name_or_id is not None:
+ address_scope = self.conn.network.find_address_scope(
+ address_scope_name_or_id, ignore_missing=False)
+ kwargs['address_scope_id'] = address_scope.id
+
+ extra_specs = self.params['extra_specs']
+ if extra_specs:
+ duplicate_keys = set(kwargs.keys()) & set(extra_specs.keys())
+ if duplicate_keys:
+ raise ValueError('Duplicate key(s) in extra_specs: {0}'
+ .format(', '.join(list(duplicate_keys))))
+ kwargs = dict(kwargs, **extra_specs)
+
+ return self.conn.network.create_subnet_pool(**kwargs)
+
+ def _delete(self, subnet_pool):
+ self.conn.network.delete_subnet_pool(subnet_pool.id)
+
+ def _update(self, subnet_pool, update):
+ attributes = update.get('attributes')
+ if attributes:
+ subnet_pool = self.conn.network.update_subnet_pool(subnet_pool.id,
+ **attributes)
+
+ return subnet_pool
+
+ def _will_change(self, state, subnet_pool):
+ if state == 'present' and not subnet_pool:
+ return True
+ elif state == 'present' and subnet_pool:
+ return bool(self._build_update(subnet_pool))
+ elif state == 'absent' and subnet_pool:
+ return True
else:
- project_id = self.conn.current_project_id
-
- address_scope_id = None
- if address_scope is not None:
- address_scope = self.conn.network.find_address_scope(name_or_id=address_scope)
- if address_scope is None:
- self.fail(msg='AddressScope %s could not be found' % address_scope)
- address_scope_id = address_scope['id']
- subnet_pool = self.conn.network.find_subnet_pool(name_or_id=name)
- if self.ansible.check_mode:
- self.exit_json(
- changed=self._system_state_change(subnet_pool)
- )
-
- if state == 'present':
- changed = False
-
- if not subnet_pool:
- kwargs = self._compose_subnet_pool_args()
- kwargs['address_scope_id'] = address_scope_id
- kwargs['project_id'] = project_id
- kwargs['is_shared'] = self.params['shared']
- dup_args = set(kwargs.keys()) & set(extra_specs.keys())
- if dup_args:
- raise ValueError('Duplicate key(s) {0} in extra_specs'
- .format(list(dup_args)))
- kwargs = dict(kwargs, **extra_specs)
- subnet_pool = self.conn.network.create_subnet_pool(**kwargs)
- changed = True
- else:
- if self._needs_update(subnet_pool):
- kwargs = self._compose_subnet_pool_args()
- subnet_pool = self.conn.network.update_subnet_pool(subnet_pool['id'], **kwargs)
- changed = True
- else:
- changed = False
- self.exit_json(changed=changed, subnet_pool=subnet_pool, id=subnet_pool['id'])
-
- elif state == 'absent':
- if not subnet_pool:
- self.exit(changed=False)
- else:
- self.conn.network.delete_subnet_pool(subnet_pool['id'])
- self.exit_json(changed=True)
+ # state == 'absent' and not subnet_pool:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/subnets_info.py b/ansible_collections/openstack/cloud/plugins/modules/subnets_info.py
index 7a771b53a..a35a0c4be 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/subnets_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/subnets_info.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -10,8 +11,6 @@ short_description: Retrieve information about one or more OpenStack subnets.
author: OpenStack Ansible SIG
description:
- Retrieve information about one or more subnets from OpenStack.
- - This module was called C(openstack.cloud.subnets_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(openstack.cloud.subnets_info) module no longer returns C(ansible_facts)!
options:
name:
description:
@@ -26,10 +25,6 @@ options:
this dictionary may be additional dictionaries.
required: false
type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
@@ -46,7 +41,7 @@ EXAMPLES = '''
- name: Show openstack subnets
debug:
- msg: "{{ result.openstack_subnets }}"
+ msg: "{{ result.subnets }}"
- name: Gather information about a previously created subnet by name
openstack.cloud.subnets_info:
@@ -60,7 +55,7 @@ EXAMPLES = '''
- name: Show openstack subnets
debug:
- msg: "{{ result.openstack_subnets }}"
+ msg: "{{ result.subnets }}"
- name: Gather information about a previously created subnet with filter
# Note: name and filters parameters are not mutually exclusive
@@ -71,88 +66,130 @@ EXAMPLES = '''
password: password
project_name: someproject
filters:
- tenant_id: 55e2ce24b2a245b09f181bf025724cbe
+ project_id: 55e2ce24b2a245b09f181bf025724cbe
register: result
- name: Show openstack subnets
debug:
- msg: "{{ result.openstack_subnets }}"
+ msg: "{{ result.subnets }}"
'''
RETURN = '''
-openstack_subnets:
+subnets:
description: has all the openstack information about the subnets
- returned: always, but can be null
- type: complex
+ returned: always, but can be empty list
+ type: list
+ elements: dict
contains:
id:
- description: Unique UUID.
- returned: success
+ description: The ID of the subnet.
type: str
name:
description: Name given to the subnet.
- returned: success
+ type: str
+ description:
+ description: Description of the subnet.
type: str
network_id:
description: Network ID this subnet belongs in.
- returned: success
type: str
cidr:
description: Subnet's CIDR.
- returned: success
type: str
gateway_ip:
description: Subnet's gateway ip.
- returned: success
type: str
- enable_dhcp:
- description: DHCP enable flag for this subnet.
- returned: success
+ is_dhcp_enabled:
+ description: Is DHCP enabled.
type: bool
ip_version:
description: IP version for this subnet.
- returned: success
type: int
- tenant_id:
- description: Tenant id associated with this subnet.
- returned: success
- type: str
dns_nameservers:
description: DNS name servers for this subnet.
- returned: success
type: list
elements: str
allocation_pools:
description: Allocation pools associated with this subnet.
- returned: success
type: list
elements: dict
+ created_at:
+ description: Date and time when the resource was created.
+ type: str
+ updated_at:
+ description: Date and time when the resource was updated.
+ type: str
+ dns_publish_fixed_ip:
+ description: Whether to publish DNS records for IPs from this subnet.
+ type: str
+ host_routes:
+ description: Additional routes for the subnet.
+ type: list
+ elements: dict
+ ipv6_address_mode:
+ description: The IPv6 address modes specifies mechanisms for assigning IP addresses.
+ type: str
+ ipv6_ra_mode:
+ description: The IPv6 router advertisement specifies whether the networking service should transmit ICMPv6 packets, for a subnet.
+ type: str
+ project_id:
+ description: The ID of the project.
+ type: str
+ revision_number:
+ description: The revision number of the resource.
+ type: str
+ segment_id:
+ description: The ID of a network segment the subnet is associated with.
+ type: str
+ service_types:
+ description: The service types associated with the subnet.
+ type: list
+ elements: str
+ subnet_pool_id:
+ description: The ID of the subnet pool associated with the subnet.
+ type: str
+ tags:
+ description: The list of tags on the resource.
+ type: list
+ elements: str
+ prefix_length:
+ description: The prefix length to use for subnet allocation from a subnet pool.
+ type: str
+ use_default_subnet_pool:
+ description: Whether to use the default subnet pool to obtain a CIDR.
+ type: bool
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class SubnetInfoModule(OpenStackModule):
-
- deprecated_names = ('subnets_facts', 'openstack.cloud.subnets_facts')
-
argument_spec = dict(
- name=dict(required=False, default=None, aliases=['subnet']),
- filters=dict(required=False, type='dict', default=None)
+ name=dict(aliases=['subnet']),
+ filters=dict(type='dict')
)
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- kwargs = self.check_versioned(
- filters=self.params['filters']
- )
+ kwargs = {}
+ subnets = []
if self.params['name']:
- kwargs['name_or_id'] = self.params['name']
- subnets = self.conn.search_subnets(**kwargs)
-
- self.exit(changed=False, openstack_subnets=subnets)
+ kwargs['name'] = self.params['name']
+ # Try to get subnet by ID
+ try:
+ raw = self.conn.network.get_subnet(self.params['name'])
+ raw = raw.to_dict(computed=False)
+ subnets.append(raw)
+ self.exit(changed=False, subnets=subnets)
+ except self.sdk.exceptions.ResourceNotFound:
+ pass
+ if self.params['filters']:
+ kwargs.update(self.params['filters'])
+ subnets = self.conn.network.subnets(**kwargs)
+ subnets = [i.to_dict(computed=False) for i in subnets]
+ self.exit(changed=False, subnets=subnets)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume.py b/ansible_collections/openstack/cloud/plugins/modules/volume.py
index 3a50c05a8..735b736fe 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -11,247 +12,335 @@ author: OpenStack Ansible SIG
description:
- Create or Remove cinder block storage volumes
options:
- size:
- description:
- - Size of volume in GB. This parameter is required when the
- I(state) parameter is 'present'.
- type: int
- display_name:
+ availability_zone:
description:
- - Name of volume
- required: true
+ - The availability zone.
type: str
- aliases: [name]
- display_description:
+ description:
description:
- String describing the volume
type: str
- aliases: [description]
- volume_type:
- description:
- - Volume type for volume
- type: str
+ aliases: [display_description]
image:
description:
- Image name or id for boot from volume
+ - Mutually exclusive with I(snapshot) and I(volume)
type: str
- snapshot_id:
- description:
- - Volume snapshot id to create from
- type: str
- volume:
- description:
- - Volume name or id to create from
- type: str
- bootable:
+ is_bootable:
description:
- Bootable flag for volume.
type: bool
default: False
+ aliases: [bootable]
+ is_multiattach:
+ description:
+ - Whether volume will be sharable or not.
+ - To enable this volume to attach to more than one server, set
+ I(is_multiattach) to C(true).
+ - Note that support for multiattach volumes depends on the volume
+ type being used.
+ - "Cinder's default for I(is_multiattach) is C(false)."
+ type: bool
+ metadata:
+ description:
+ - Metadata for the volume
+ type: dict
+ name:
+ description:
+ - Name of volume
+ required: true
+ type: str
+ aliases: [display_name]
+ scheduler_hints:
+ description:
+ - Scheduler hints passed to volume API in form of dict
+ type: dict
+ size:
+ description:
+ - Size of volume in GB. This parameter is required when the
+ I(state) parameter is 'present'.
+ type: int
+ snapshot:
+ description:
+ - Volume snapshot name or id to create from
+ - Mutually exclusive with I(image) and I(volume)
+ type: str
+ aliases: [snapshot_id]
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
- scheduler_hints:
+ volume:
description:
- - Scheduler hints passed to volume API in form of dict
- type: dict
- metadata:
+ - Volume name or id to create from
+ - Mutually exclusive with I(image) and I(snapshot)
+ type: str
+ volume_type:
description:
- - Metadata for the volume
- type: dict
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ - Volume type for volume
+ type: str
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
# Creates a new volume
-- name: create a volume
- hosts: localhost
- tasks:
- - name: create 40g test volume
- openstack.cloud.volume:
- state: present
- cloud: mordred
- availability_zone: az2
- size: 40
- display_name: test_volume
- scheduler_hints:
- same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
+- name: create 40g test volume
+ openstack.cloud.volume:
+ state: present
+ cloud: mordred
+ availability_zone: az2
+ size: 40
+ name: test_volume
+ scheduler_hints:
+ same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
'''
-RETURNS = '''
-id:
- description: Cinder's unique ID for this volume
- returned: always
- type: str
- sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
-
+RETURN = '''
volume:
description: Cinder's representation of the volume object
returned: always
type: dict
- sample: {'...'}
+ contains:
+ attachments:
+ description: Instance attachment information. If this volume is attached
+ to a server instance, the attachments list includes the UUID
+ of the attached server, an attachment UUID, the name of the
+ attached host, if any, the volume UUID, the device, and the
+ device UUID. Otherwise, this list is empty.
+ type: list
+ availability_zone:
+ description: The name of the availability zone.
+ type: str
+ consistency_group_id:
+ description: The UUID of the consistency group.
+ type: str
+ created_at:
+ description: The date and time when the resource was created.
+ type: str
+ description:
+ description: The volume description.
+ type: str
+ extended_replication_status:
+ description: Extended replication status on this volume.
+ type: str
+ group_id:
+ description: The ID of the group.
+ type: str
+ host:
+ description: The volume's current back-end.
+ type: str
+ id:
+ description: The UUID of the volume.
+ type: str
+ image_id:
+ description: Image on which the volume was based
+ type: str
+ is_bootable:
+ description: Enables or disables the bootable attribute. You can boot an
+ instance from a bootable volume.
+ type: str
+ is_encrypted:
+ description: If true, this volume is encrypted.
+ type: bool
+ is_multiattach:
+ description: Whether this volume can be attached to more than one
+ server.
+ type: bool
+ metadata:
+ description: A metadata object. Contains one or more metadata key and
+ value pairs that are associated with the volume.
+ type: dict
+ migration_id:
+ description: The volume ID that this volume name on the backend is
+ based on.
+ type: str
+ migration_status:
+ description: The status of this volume migration (None means that a
+ migration is not currently in progress).
+ type: str
+ name:
+ description: The volume name.
+ type: str
+ project_id:
+ description: The project ID which the volume belongs to.
+ type: str
+ replication_driver_data:
+ description: Data set by the replication driver
+ type: str
+ replication_status:
+ description: The volume replication status.
+ type: str
+ scheduler_hints:
+ description: Scheduler hints for the volume
+ type: dict
+ size:
+ description: The size of the volume, in gibibytes (GiB).
+ type: int
+ snapshot_id:
+ description: To create a volume from an existing snapshot, specify the
+ UUID of the volume snapshot. The volume is created in same
+ availability zone and with same size as the snapshot.
+ type: str
+ source_volume_id:
+ description: The UUID of the source volume. The API creates a new volume
+ with the same size as the source volume unless a larger size
+ is requested.
+ type: str
+ status:
+ description: The volume status.
+ type: str
+ updated_at:
+ description: The date and time when the resource was updated.
+ type: str
+ user_id:
+ description: The UUID of the user.
+ type: str
+ volume_image_metadata:
+ description: List of image metadata entries. Only included for volumes
+ that were created from an image, or from a snapshot of a
+ volume originally created from an image.
+ type: dict
+ volume_type:
+ description: The associated volume type name for the volume.
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class VolumeModule(OpenStackModule):
-
argument_spec = dict(
+ availability_zone=dict(),
+ description=dict(aliases=['display_description']),
+ image=dict(),
+ is_bootable=dict(type='bool', default=False, aliases=['bootable']),
+ is_multiattach=dict(type='bool'),
+ metadata=dict(type='dict'),
+ name=dict(required=True, aliases=['display_name']),
+ scheduler_hints=dict(type='dict'),
size=dict(type='int'),
- volume_type=dict(type='str'),
- display_name=dict(required=True, aliases=['name'], type='str'),
- display_description=dict(aliases=['description'], type='str'),
- image=dict(type='str'),
- snapshot_id=dict(type='str'),
- volume=dict(type='str'),
+ snapshot=dict(aliases=['snapshot_id']),
state=dict(default='present', choices=['absent', 'present'], type='str'),
- scheduler_hints=dict(type='dict'),
- metadata=dict(type='dict'),
- bootable=dict(type='bool', default=False)
+ volume=dict(),
+ volume_type=dict(),
)
module_kwargs = dict(
+ supports_check_mode=True,
mutually_exclusive=[
- ['image', 'snapshot_id', 'volume'],
+ ['image', 'snapshot', 'volume'],
],
required_if=[
['state', 'present', ['size']],
],
)
- def _needs_update(self, volume):
- '''
- check for differences in updatable values, at the moment
- openstacksdk only supports extending the volume size, this
- may change in the future.
- :returns: bool
- '''
- compare_simple = ['size']
-
- for k in compare_simple:
- if self.params[k] is not None and self.params[k] != volume.get(k):
- return True
+ def _build_update(self, volume):
+ keys = ('size',)
+ return {k: self.params[k] for k in keys if self.params[k] is not None
+ and self.params[k] != volume[k]}
- return False
-
- def _modify_volume(self, volume):
+ def _update(self, volume):
'''
modify volume, the only modification to an existing volume
available at the moment is extending the size, this is
limited by the openstacksdk and may change whenever the
functionality is extended.
'''
- volume = self.conn.get_volume(self.params['display_name'])
- diff = {'before': volume, 'after': ''}
- size = self.params['size']
+ diff = {'before': volume.to_dict(computed=False), 'after': ''}
+ diff['after'] = diff['before']
- if size < volume.get('size'):
- self.fail_json(
- msg='Cannot shrink volumes, size: {0} < {1}'.format(size, volume.get('size'))
- )
+ update = self._build_update(volume)
- if not self._needs_update(volume):
- diff['after'] = volume
- self.exit_json(changed=False, id=volume['id'], volume=volume, diff=diff)
+ if not update:
+ self.exit_json(changed=False,
+ volume=volume.to_dict(computed=False), diff=diff)
if self.ansible.check_mode:
- diff['after'] = volume
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
+ volume.size = update['size']
+ self.exit_json(changed=False,
+ volume=volume.to_dict(computed=False), diff=diff)
- self.conn.volume.extend_volume(
- volume.id,
- size
- )
- diff['after'] = self.conn.get_volume(self.params['display_name'])
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
+ if 'size' in update and update['size'] != volume.size:
+ size = update['size']
+ self.conn.volume.extend_volume(volume.id, size)
+ volume = self.conn.block_storage.get_volume(volume)
- def _present_volume(self):
+ volume = volume.to_dict(computed=False)
+ diff['after'] = volume
+ self.exit_json(changed=True, volume=volume, diff=diff)
- diff = {'before': '', 'after': ''}
+ def _build_create_kwargs(self):
+ keys = ('availability_zone', 'is_multiattach', 'size', 'name',
+ 'description', 'volume_type', 'scheduler_hints', 'metadata')
+ kwargs = {k: self.params[k] for k in keys
+ if self.params[k] is not None}
+
+ find_filters = {}
+
+ if self.params['snapshot']:
+ snapshot = self.conn.block_storage.find_snapshot(
+ self.params['snapshot'], ignore_missing=False, **find_filters)
+ kwargs['snapshot_id'] = snapshot.id
- volume_args = dict(
- size=self.params['size'],
- volume_type=self.params['volume_type'],
- display_name=self.params['display_name'],
- display_description=self.params['display_description'],
- snapshot_id=self.params['snapshot_id'],
- bootable=self.params['bootable'],
- availability_zone=self.params['availability_zone'],
- )
if self.params['image']:
- image_id = self.conn.get_image_id(self.params['image'])
- if not image_id:
- self.fail_json(msg="Failed to find image '%s'" % self.params['image'])
- volume_args['imageRef'] = image_id
+ image = self.conn.image.find_image(
+ self.params['image'], ignore_missing=False)
+ kwargs['image_id'] = image.id
if self.params['volume']:
- volume_id = self.conn.get_volume_id(self.params['volume'])
- if not volume_id:
- self.fail_json(msg="Failed to find volume '%s'" % self.params['volume'])
- volume_args['source_volid'] = volume_id
+ volume = self.conn.block_storage.find_volume(
+ self.params['volume'], ignore_missing=False, **find_filters)
+ kwargs['source_volume_id'] = volume.id
- if self.params['scheduler_hints']:
- volume_args['scheduler_hints'] = self.params['scheduler_hints']
+ return kwargs
- if self.params['metadata']:
- volume_args['metadata'] = self.params['metadata']
+ def _create(self):
+ diff = {'before': '', 'after': ''}
+ volume_args = self._build_create_kwargs()
if self.ansible.check_mode:
diff['after'] = volume_args
- self.exit_json(changed=True, id=None, volume=volume_args, diff=diff)
+ self.exit_json(changed=True, volume=volume_args, diff=diff)
- volume = self.conn.create_volume(
- wait=self.params['wait'], timeout=self.params['timeout'],
- **volume_args)
+ volume = self.conn.block_storage.create_volume(**volume_args)
+ if self.params['wait']:
+ self.conn.block_storage.wait_for_status(
+ volume, wait=self.params['timeout'])
+
+ volume = volume.to_dict(computed=False)
diff['after'] = volume
- self.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
+ self.exit_json(changed=True, volume=volume, diff=diff)
- def _absent_volume(self, volume):
- changed = False
+ def _delete(self, volume):
diff = {'before': '', 'after': ''}
+ if volume is None:
+ self.exit_json(changed=False, diff=diff)
- if self.conn.volume_exists(self.params['display_name']):
- volume = self.conn.get_volume(self.params['display_name'])
- diff['before'] = volume
-
- if self.ansible.check_mode:
- self.exit_json(changed=True, diff=diff)
+ diff['before'] = volume.to_dict(computed=False)
- try:
- changed = self.conn.delete_volume(name_or_id=self.params['display_name'],
- wait=self.params['wait'],
- timeout=self.params['timeout'])
- except self.sdk.exceptions.ResourceTimeout:
- diff['after'] = volume
- self.exit_json(changed=changed, diff=diff)
+ if self.ansible.check_mode:
+ self.exit_json(changed=True, diff=diff)
- self.exit_json(changed=changed, diff=diff)
+ self.conn.block_storage.delete_volume(volume)
+ if self.params['wait']:
+ self.conn.block_storage.wait_for_delete(
+ volume, wait=self.params['timeout'])
+ self.exit_json(changed=True, diff=diff)
def run(self):
-
state = self.params['state']
- if self.conn.volume_exists(self.params['display_name']):
- volume = self.conn.get_volume(self.params['display_name'])
- else:
- volume = None
+ volume = self.conn.block_storage.find_volume(self.params['name'])
if state == 'present':
if not volume:
- self._present_volume()
- elif self._needs_update(volume):
- self._modify_volume(volume)
+ self._create()
else:
- self.exit_json(changed=False, id=volume['id'], volume=volume)
+ self._update(volume)
if state == 'absent':
- self._absent_volume(volume)
+ self._delete(volume)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_backup.py b/ansible_collections/openstack/cloud/plugins/modules/volume_backup.py
index 43cacc72b..0101e35a7 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume_backup.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_backup.py
@@ -1,31 +1,44 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2020 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: volume_backup
short_description: Add/Delete Volume backup
-extends_documentation_fragment: openstack.cloud.openstack
author: OpenStack Ansible SIG
description:
- - Add or Remove Volume Backup in OTC.
+ - Add or Remove Volume Backup in OpenStack.
options:
- display_name:
+ description:
+ description:
+ - String describing the backup
+ type: str
+ aliases: ['display_description']
+ force:
+ description:
+ - Indicates whether to backup, even if the volume is attached.
+ type: bool
+ default: False
+ is_incremental:
+ description: The backup mode
+ type: bool
+ default: False
+ aliases: ['incremental']
+ metadata:
+ description: Metadata for the backup
+ type: dict
+ name:
description:
- Name that has to be given to the backup
required: true
type: str
- aliases: ['name']
- display_description:
- description:
- - String describing the backup
- required: false
+ aliases: ['display_name']
+ snapshot:
+ description: Name or ID of the Snapshot to take backup of.
type: str
- aliases: ['description']
state:
description:
- Should the resource be present or absent.
@@ -34,63 +47,111 @@ options:
type: str
volume:
description:
- - Name or ID of the volume. Required when state is True.
+ - Name or ID of the volume.
+ - Required when I(state) is C(present).
type: str
- required: False
- snapshot:
- description: Name or ID of the Snapshot to take backup of
- type: str
- force:
- description:
- - Indicates whether to backup, even if the volume is attached.
- type: bool
- default: False
- metadata:
- description: Metadata for the backup
- type: dict
- incremental:
- description: The backup mode
- type: bool
- default: False
-requirements: ["openstacksdk"]
+notes:
+ - This module does not support updates to existing backups.
+extends_documentation_fragment:
+- openstack.cloud.openstack
'''
-RETURN = '''
-id:
- description: The Volume backup ID.
- returned: On success when C(state=present)
- type: str
- sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+RETURN = r'''
backup:
- description: Dictionary describing the Cluster.
+ description: Same as C(volume_backup), kept for backward compatibility.
returned: On success when C(state=present)
- type: complex
+ type: dict
+volume_backup:
+ description: Dictionary describing the volume backup.
+ returned: On success when C(state=present)
+ type: dict
contains:
+ availability_zone:
+ description: Backup availability zone.
+ type: str
+ container:
+ description: The container name.
+ type: str
+ created_at:
+ description: Backup creation time.
+ type: str
+ data_timestamp:
+ description: The time when the data on the volume was first saved.
+ If it is a backup from volume, it will be the same as
+ C(created_at) for a backup. If it is a backup from a
+ snapshot, it will be the same as created_at for the
+ snapshot.
+ type: str
+ description:
+ description: Backup desciption.
+ type: str
+ fail_reason:
+ description: Backup fail reason.
+ type: str
+ force:
+ description: Force backup.
+ type: bool
+ has_dependent_backups:
+ description: If this value is true, there are other backups
+ depending on this backup.
+ type: bool
id:
description: Unique UUID.
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+ is_incremental:
+ description: Backup incremental property.
+ type: bool
+ links:
+ description: A list of links associated with this volume.
+ type: list
+ metadata:
+ description: Backup metadata.
+ type: dict
name:
- description: Name given to the load balancer.
+ description: Backup Name.
+ type: str
+ object_count:
+ description: backup object count.
+ type: int
+ project_id:
+ description: The UUID of the owning project.
+ type: str
+ size:
+ description: The size of the volume, in gibibytes (GiB).
+ type: int
+ snapshot_id:
+ description: Snapshot ID.
+ type: str
+ status:
+ description: Backup status.
+ type: str
+ updated_at:
+ description: Backup update time.
+ type: str
+ user_id:
+ description: The UUID of the project owner.
+ type: str
+ volume_id:
+ description: Volume ID.
type: str
- sample: "elb_test"
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- name: Create backup
openstack.cloud.volume_backup:
- display_name: test_volume_backup
+ name: test_volume_backup
volume: "test_volume"
- name: Create backup from snapshot
openstack.cloud.volume_backup:
- display_name: test_volume_backup
- volume: "test_volume"
+ name: test_volume_backup
snapshot: "test_snapshot"
+ volume: "test_volume"
- name: Delete volume backup
openstack.cloud.volume_backup:
- display_name: test_volume_backup
+ name: test_volume_backup
state: absent
'''
@@ -98,19 +159,20 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class VolumeBackupModule(OpenStackModule):
- module_min_sdk_version = '0.49.0'
argument_spec = dict(
- display_name=dict(required=True, aliases=['name'], type='str'),
- display_description=dict(required=False, aliases=['description'],
- type='str'),
- volume=dict(required=False, type='str'),
- snapshot=dict(required=False, type='str'),
- state=dict(default='present', type='str', choices=['absent', 'present']),
+ description=dict(aliases=['display_description']),
force=dict(default=False, type='bool'),
- metadata=dict(required=False, type='dict'),
- incremental=dict(required=False, default=False, type='bool')
+ is_incremental=dict(default=False,
+ type='bool',
+ aliases=['incremental']),
+ metadata=dict(type='dict'),
+ name=dict(required=True, aliases=['display_name']),
+ snapshot=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ volume=dict(),
)
+
module_kwargs = dict(
required_if=[
('state', 'present', ['volume'])
@@ -118,98 +180,79 @@ class VolumeBackupModule(OpenStackModule):
supports_check_mode=True
)
- def _create_backup(self):
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- name = self.params['display_name']
- description = self.params['display_description']
- volume = self.params['volume']
- snapshot = self.params['snapshot']
- force = self.params['force']
- is_incremental = self.params['incremental']
- metadata = self.params['metadata']
+ def run(self):
+ name = self.params['name']
+ state = self.params['state']
- changed = False
+ backup = self.conn.block_storage.find_backup(name)
- cloud_volume = self.conn.block_storage.find_volume(volume)
- cloud_snapshot_id = None
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, backup))
+
+ if state == 'present' and not backup:
+ backup = self._create()
+ self.exit_json(changed=True,
+ backup=backup.to_dict(computed=False),
+ volume_backup=backup.to_dict(computed=False))
+
+ elif state == 'present' and backup:
+ # We do not support backup updates, because
+ # openstacksdk does not support it either
+ self.exit_json(changed=False,
+ backup=backup.to_dict(computed=False),
+ volume_backup=backup.to_dict(computed=False))
+
+ elif state == 'absent' and backup:
+ self._delete(backup)
+ self.exit_json(changed=True)
- attrs = {
- 'name': name,
- 'volume_id': cloud_volume.id,
- 'force': force,
- 'is_incremental': is_incremental
- }
+ else: # state == 'absent' and not backup
+ self.exit_json(changed=False)
- if snapshot:
- cloud_snapshot_id = self.conn.block_storage.find_snapshot(
- snapshot, ignore_missing=False).id
- attrs['snapshot_id'] = cloud_snapshot_id
+ def _create(self):
+ args = dict()
+ for k in ['description', 'is_incremental', 'force', 'metadata',
+ 'name']:
+ if self.params[k] is not None:
+ args[k] = self.params[k]
- if metadata:
- attrs['metadata'] = metadata
+ volume_name_or_id = self.params['volume']
+ volume = self.conn.block_storage.find_volume(volume_name_or_id,
+ ignore_missing=False)
+ args['volume_id'] = volume.id
- if description:
- attrs['description'] = description
+ snapshot_name_or_id = self.params['snapshot']
+ if snapshot_name_or_id:
+ snapshot = self.conn.block_storage.find_snapshot(
+ snapshot_name_or_id, ignore_missing=False)
+ args['snapshot_id'] = snapshot.id
- backup = self.conn.block_storage.create_backup(**attrs)
- changed = True
+ backup = self.conn.block_storage.create_backup(**args)
if self.params['wait']:
- try:
- backup = self.conn.block_storage.wait_for_status(
- backup,
- status='available',
- wait=self.params['timeout'])
- self.exit_json(
- changed=True, volume_backup=backup.to_dict(), id=backup.id
- )
- except self.sdk.exceptions.ResourceTimeout:
- self.fail_json(
- msg='Timeout failure waiting for backup '
- 'to complete'
- )
-
- self.exit_json(
- changed=changed, volume_backup=backup.to_dict(), id=backup.id
- )
-
- def _delete_backup(self, backup):
- if self.ansible.check_mode:
- self.exit_json(changed=True)
-
- if backup:
- self.conn.block_storage.delete_backup(backup)
- if self.params['wait']:
- try:
- self.conn.block_storage.wait_for_delete(
- backup,
- interval=2,
- wait=self.params['timeout'])
- except self.sdk.exceptions.ResourceTimeout:
- self.fail_json(
- msg='Timeout failure waiting for backup '
- 'to be deleted'
- )
-
- self.exit_json(changed=True)
-
- def run(self):
- name = self.params['display_name']
-
- backup = self.conn.block_storage.find_backup(name)
+ backup = self.conn.block_storage.wait_for_status(
+ backup, status='available', wait=self.params['timeout'])
- if self.params['state'] == 'present':
- if not backup:
- self._create_backup()
- else:
- # For the moment we do not support backup update, since SDK
- # doesn't support it either => do nothing
- self.exit_json(changed=False)
+ return backup
- elif self.params['state'] == 'absent':
- self._delete_backup(backup)
+ def _delete(self, backup):
+ self.conn.block_storage.delete_backup(backup)
+ if self.params['wait']:
+ self.conn.block_storage.wait_for_delete(
+ backup, wait=self.params['timeout'])
+
+ def _will_change(self, state, backup):
+ if state == 'present' and not backup:
+ return True
+ elif state == 'present' and backup:
+ # We do not support backup updates, because
+ # openstacksdk does not support it either
+ return False
+ elif state == 'absent' and backup:
+ return True
+ else:
+ # state == 'absent' and not backup:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_backup_info.py b/ansible_collections/openstack/cloud/plugins/modules/volume_backup_info.py
index fdb61834c..61c3134d4 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume_backup_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_backup_info.py
@@ -1,11 +1,10 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2020 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: volume_backup_info
short_description: Get Backups
@@ -19,14 +18,13 @@ options:
type: str
volume:
description:
- - Name of the volume.
+ - Name or ID of the volume.
type: str
-requirements: ["openstacksdk"]
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-RETURN = '''
+RETURN = r'''
volume_backups:
description: List of dictionaries describing volume backups.
type: list
@@ -36,12 +34,32 @@ volume_backups:
availability_zone:
description: Backup availability zone.
type: str
+ container:
+ description: The container name.
+ type: str
created_at:
description: Backup creation time.
type: str
+ data_timestamp:
+ description: The time when the data on the volume was first saved.
+ If it is a backup from volume, it will be the same as
+ C(created_at) for a backup. If it is a backup from a
+ snapshot, it will be the same as created_at for the
+ snapshot.
+ type: str
description:
description: Backup desciption.
type: str
+ fail_reason:
+ description: Backup fail reason.
+ type: str
+ force:
+ description: Force backup.
+ type: bool
+ has_dependent_backups:
+ description: If this value is true, there are other backups
+ depending on this backup.
+ type: bool
id:
description: Unique UUID.
type: str
@@ -49,12 +67,24 @@ volume_backups:
is_incremental:
description: Backup incremental property.
type: bool
+ links:
+ description: A list of links associated with this volume.
+ type: list
metadata:
description: Backup metadata.
type: dict
name:
description: Backup Name.
type: str
+ object_count:
+ description: backup object count.
+ type: int
+ project_id:
+ description: The UUID of the owning project.
+ type: str
+ size:
+ description: The size of the volume, in gibibytes (GiB).
+ type: int
snapshot_id:
description: Snapshot ID.
type: str
@@ -64,57 +94,56 @@ volume_backups:
updated_at:
description: Backup update time.
type: str
+ user_id:
+ description: The UUID of the project owner.
+ type: str
volume_id:
description: Volume ID.
type: str
-
'''
-EXAMPLES = '''
-# Get backups.
-- openstack.cloud.volume_backup_info:
- register: backup
+EXAMPLES = r'''
+- name: Get all backups
+ openstack.cloud.volume_backup_info:
-- openstack.cloud.volume_backup_info:
+- name: Get backup 'my_fake_backup'
+ openstack.cloud.volume_backup_info:
name: my_fake_backup
- register: backup
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class VolumeBackupInfoModule(OpenStackModule):
- module_min_sdk_version = '0.49.0'
argument_spec = dict(
- name=dict(required=False, type='str'),
- volume=dict(required=False, type='str')
+ name=dict(),
+ volume=dict()
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- name_filter = self.params['name']
- volume = self.params['volume']
-
- data = []
- attrs = {}
-
- if name_filter:
- attrs['name'] = name_filter
- if volume:
- attrs['volume_id'] = self.conn.block_storage.find_volume(volume)
-
- for raw in self.conn.block_storage.backups(**attrs):
- dt = raw.to_dict()
- dt.pop('location')
- data.append(dt)
-
- self.exit_json(
- changed=False,
- volume_backups=data
- )
+ kwargs = dict((k, self.params[k])
+ for k in ['name']
+ if self.params[k] is not None)
+
+ volume_name_or_id = self.params['volume']
+ volume = None
+ if volume_name_or_id:
+ volume = self.conn.block_storage.find_volume(volume_name_or_id)
+ if volume:
+ kwargs['volume_id'] = volume.id
+
+ if volume_name_or_id and not volume:
+ backups = []
+ else:
+ backups = [b.to_dict(computed=False)
+ for b in self.conn.block_storage.backups(**kwargs)]
+
+ self.exit_json(changed=False, volume_backups=backups)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_info.py b/ansible_collections/openstack/cloud/plugins/modules/volume_info.py
index bcce49940..fd6cea005 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_info.py
@@ -1,50 +1,144 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2020, Sagi Shnaidman <sshnaidm@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: volume_info
-short_description: Retrive information about volumes
+short_description: Retrieve information about volumes
author: Sagi Shnaidman (@sshnaidm)
description:
- Get information about block storage in openstack
options:
- details:
+ all_projects:
description:
- - Whether to provide additional information about volumes
+ - Whether to return the volumes in all projects
type: bool
- all_projects:
+ details:
description:
- - Whether return the volumes in all projects
+ - Whether to provide additional information about volumes
type: bool
name:
description:
- - Name of the volume as a string.
+ - Name of the volume
type: str
required: false
status:
description:
- - Value of the status of the volume so that you can filter on "available" for example
+ - Status of the volume so that you can filter on C(available) for example
type: str
required: false
-
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-RETURN = '''
+RETURN = r'''
volumes:
- description: Volumes in project
+ description: Volumes in project(s)
returned: always
type: list
elements: dict
+ contains:
+ attachments:
+ description: Instance attachment information. If this volume is attached
+ to a server instance, the attachments list includes the UUID
+ of the attached server, an attachment UUID, the name of the
+ attached host, if any, the volume UUID, the device, and the
+ device UUID. Otherwise, this list is empty.
+ type: list
+ availability_zone:
+ description: The name of the availability zone.
+ type: str
+ consistency_group_id:
+ description: The UUID of the consistency group.
+ type: str
+ created_at:
+ description: The date and time when the resource was created.
+ type: str
+ description:
+ description: The volume description.
+ type: str
+ extended_replication_status:
+ description: Extended replication status on this volume.
+ type: str
+ group_id:
+ description: The ID of the group.
+ type: str
+ host:
+ description: The volume's current back-end.
+ type: str
+ id:
+ description: The UUID of the volume.
+ type: str
+ image_id:
+ description: Image on which the volume was based
+ type: str
+ is_bootable:
+ description: Enables or disables the bootable attribute. You can boot an
+ instance from a bootable volume.
+ type: str
+ is_encrypted:
+ description: If true, this volume is encrypted.
+ type: bool
+ metadata:
+ description: A metadata object. Contains one or more metadata key and
+ value pairs that are associated with the volume.
+ type: dict
+ migration_id:
+ description: The volume ID that this volume name on the backend is
+ based on.
+ type: str
+ migration_status:
+ description: The status of this volume migration (None means that a
+ migration is not currently in progress).
+ type: str
+ name:
+ description: The volume name.
+ type: str
+ project_id:
+ description: The project ID which the volume belongs to.
+ type: str
+ replication_driver_data:
+ description: Data set by the replication driver
+ type: str
+ replication_status:
+ description: The volume replication status.
+ type: str
+ scheduler_hints:
+ description: Scheduler hints for the volume
+ type: dict
+ size:
+ description: The size of the volume, in gibibytes (GiB).
+ type: int
+ snapshot_id:
+ description: To create a volume from an existing snapshot, specify the
+ UUID of the volume snapshot. The volume is created in same
+ availability zone and with same size as the snapshot.
+ type: str
+ source_volume_id:
+ description: The UUID of the source volume. The API creates a new volume
+ with the same size as the source volume unless a larger size
+ is requested.
+ type: str
+ status:
+ description: The volume status.
+ type: str
+ updated_at:
+ description: The date and time when the resource was updated.
+ type: str
+ user_id:
+ description: The UUID of the user.
+ type: str
+ volume_image_metadata:
+ description: List of image metadata entries. Only included for volumes
+ that were created from an image, or from a snapshot of a
+ volume originally created from an image.
+ type: dict
+ volume_type:
+ description: The associated volume type name for the volume.
+ type: str
sample:
- attachments: []
availability_zone: nova
@@ -52,25 +146,12 @@ volumes:
created_at: '2017-11-15T10:51:19.000000'
description: ''
extended_replication_status: null
+ group_id: 402ac6ed-527f-4781-8484-7ff4467e34f5
host: null
id: 103ac6ed-527f-4781-8484-7ff4467e34f5
image_id: null
is_bootable: true
is_encrypted: false
- links:
- - href: https://...
- rel: self
- - href: https://...
- rel: bookmark
- location:
- cloud: cloud
- project:
- domain_id: null
- domain_name: Default
- id: cfe04702154742fc964d9403c691c76e
- name: username
- region_name: regionOne
- zone: nova
metadata:
readonly: 'False'
migration_id: null
@@ -79,10 +160,13 @@ volumes:
project_id: cab34702154a42fc96ed9403c691c76e
replication_driver_data: null
replication_status: disabled
+ scheduler_hints: {}
size: 9
snapshot_id: null
source_volume_id: null
status: available
+ updated_at: '2017-11-15T10:51:19.000000'
+ user_id: ac303ed-527f-4781-8484-7ff4467e34f5
volume_image_metadata:
checksum: a14e113deeee3a3392462f167ed28cb5
container_format: bare
@@ -99,7 +183,7 @@ volumes:
volume_type: null
'''
-EXAMPLES = '''
+EXAMPLES = r'''
- openstack.cloud.volume_info:
- openstack.cloud.volume_info:
@@ -110,7 +194,7 @@ EXAMPLES = '''
- openstack.cloud.volume_info:
all_projects: true
- details: true
+ details: false
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -119,25 +203,25 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class VolumeInfoModule(OpenStackModule):
argument_spec = dict(
- details=dict(type='bool', required=False),
- all_projects=dict(type='bool', required=False, min_ver='0.19'),
- name=dict(type='str', required=False),
- status=dict(type='str', required=False),
+ all_projects=dict(type='bool'),
+ details=dict(type='bool'),
+ name=dict(),
+ status=dict(),
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
- kwargs = self.check_versioned(
- details=self.params['details'],
- name=self.params['name'],
- all_projects=self.params['all_projects'],
- status=self.params['status'],
- )
- result = self.conn.block_storage.volumes(**kwargs)
- result = [vol if isinstance(vol, dict) else vol.to_dict() for vol in result]
- self.results.update({'volumes': result})
+ kwargs = dict((k, self.params[k])
+ for k in ['all_projects', 'details', 'name', 'status']
+ if self.params[k] is not None)
+
+ volumes = [v.to_dict(computed=False)
+ for v in self.conn.block_storage.volumes(**kwargs)]
+
+ self.exit_json(changed=False, volumes=volumes)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot.py b/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot.py
index 8625984c6..ebfc7d110 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot.py
@@ -1,10 +1,10 @@
#!/usr/bin/python
-# coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Copyright (c) 2016, Mario Santos <mario.rf.santos@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: volume_snapshot
short_description: Create/Delete Cinder Volume Snapshots
@@ -12,75 +12,104 @@ author: OpenStack Ansible SIG
description:
- Create or Delete cinder block storage volume snapshots
options:
- display_name:
- description:
- - Name of the snapshot
- required: true
- aliases: ['name']
- type: str
- display_description:
- description:
- - String describing the snapshot
- aliases: ['description']
- type: str
- volume:
- description:
- - The volume name or id to create/delete the snapshot
- required: True
- type: str
- force:
- description:
- - Allows or disallows snapshot of a volume to be created when the volume
- is attached to an instance.
- type: bool
- default: 'no'
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - "python >= 3.6"
- - "openstacksdk"
-
+ description:
+ description:
+ - String describing the snapshot
+ aliases: ['display_description']
+ type: str
+ force:
+ description:
+ - Allows or disallows snapshot of a volume to be created,
+ when the volume is attached to an instance.
+ type: bool
+ default: 'false'
+ name:
+ description:
+ - Name of the snapshot
+ required: true
+ aliases: ['display_name']
+ type: str
+ state:
+ description:
+ - Should the snapshot be C(present) or C(absent).
+ choices: [present, absent]
+ default: present
+ type: str
+ volume:
+ description:
+ - Volume name or ID to create the snapshot from.
+ - Required when I(state) is C(present).
+ type: str
+notes:
+ - Updating existing volume snapshots has not been implemented yet.
extends_documentation_fragment:
- openstack.cloud.openstack
'''
-EXAMPLES = '''
-# Creates a snapshot on volume 'test_volume'
-- name: create and delete snapshot
- hosts: localhost
- tasks:
- - name: create snapshot
- openstack.cloud.volume_snapshot:
- state: present
- cloud: mordred
- availability_zone: az2
- display_name: test_snapshot
- volume: test_volume
- - name: delete snapshot
- openstack.cloud.volume_snapshot:
- state: absent
- cloud: mordred
- availability_zone: az2
- display_name: test_snapshot
- volume: test_volume
+EXAMPLES = r'''
+- name: create snapshot
+ openstack.cloud.volume_snapshot:
+ state: present
+ cloud: mordred
+ name: test_snapshot
+ volume: test_volume
+- name: delete snapshot
+ openstack.cloud.volume_snapshot:
+ state: absent
+ cloud: mordred
+ name: test_snapshot
+ volume: test_volume
'''
-RETURN = '''
+RETURN = r'''
snapshot:
- description: The snapshot instance after the change
+ description: Same as C(volume_snapshot), kept for backward compatibility.
+ returned: On success when C(state=present)
+ type: dict
+volume_snapshot:
+ description: The snapshot instance
returned: success
type: dict
- sample:
- id: 837aca54-c0ee-47a2-bf9a-35e1b4fdac0c
- name: test_snapshot
- volume_id: ec646a7c-6a35-4857-b38b-808105a24be6
- size: 2
- status: available
- display_name: test_snapshot
+ contains:
+ created_at:
+ description: Snapshot creation time.
+ type: str
+ description:
+ description: Snapshot desciption.
+ type: str
+ id:
+ description: Unique UUID.
+ type: str
+ sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+ is_forced:
+ description: Indicate whether to create snapshot,
+ even if the volume is attached.
+ type: bool
+ metadata:
+ description: Snapshot metadata.
+ type: dict
+ name:
+ description: Snapshot Name.
+ type: str
+ progress:
+ description: The percentage of completeness the snapshot is
+ currently at.
+ type: str
+ project_id:
+ description: The project ID this snapshot is associated with.
+ type: str
+ size:
+ description: The size of the volume, in GBs.
+ type: int
+ status:
+ description: Snapshot status.
+ type: str
+ updated_at:
+ description: Snapshot update time.
+ type: str
+ volume_id:
+ description: Volume ID.
+ type: str
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
@@ -88,74 +117,86 @@ from ansible_collections.openstack.cloud.plugins.module_utils.openstack import O
class VolumeSnapshotModule(OpenStackModule):
argument_spec = dict(
- display_name=dict(required=True, aliases=['name']),
- display_description=dict(default=None, aliases=['description']),
- volume=dict(required=True),
- force=dict(required=False, default=False, type='bool'),
+ description=dict(aliases=['display_description']),
+ name=dict(required=True, aliases=['display_name']),
+ force=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
+ volume=dict(),
)
module_kwargs = dict(
+ required_if=[
+ ('state', 'present', ['volume'])
+ ],
supports_check_mode=True
)
- def _present_volume_snapshot(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'], filters={'volume_id': volume.id})
- if not snapshot:
- snapshot = self.conn.create_volume_snapshot(
- volume.id,
- force=self.params['force'],
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- name=self.params['display_name'],
- description=self.params.get('display_description')
- )
- self.exit_json(changed=True, snapshot=snapshot)
- else:
- self.exit_json(changed=False, snapshot=snapshot)
-
- def _absent_volume_snapshot(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'], filters={'volume_id': volume.id})
- if not snapshot:
- self.exit_json(changed=False)
- else:
- self.conn.delete_volume_snapshot(
- name_or_id=snapshot.id,
- wait=self.params['wait'],
- timeout=self.params['timeout'],
- )
- self.exit_json(changed=True, snapshot_id=snapshot.id)
-
- def _system_state_change(self):
- volume = self.conn.get_volume(self.params['volume'])
- snapshot = self.conn.get_volume_snapshot(
- self.params['display_name'],
- filters={'volume_id': volume.id})
+ def run(self):
+ name = self.params['name']
state = self.params['state']
- if state == 'present':
- return snapshot is None
- if state == 'absent':
- return snapshot is not None
+ snapshot = self.conn.block_storage.find_snapshot(name)
- def run(self):
- state = self.params['state']
+ if self.ansible.check_mode:
+ self.exit_json(changed=self._will_change(state, snapshot))
+
+ if state == 'present' and not snapshot:
+ snapshot = self._create()
+ self.exit_json(changed=True,
+ snapshot=snapshot.to_dict(computed=False),
+ volume_snapshot=snapshot.to_dict(computed=False))
+
+ elif state == 'present' and snapshot:
+ # We do not support snapshot updates yet
+ # TODO: Implement module updates
+ self.exit_json(changed=False,
+ snapshot=snapshot.to_dict(computed=False),
+ volume_snapshot=snapshot.to_dict(computed=False))
+
+ elif state == 'absent' and snapshot:
+ self._delete(snapshot)
+ self.exit_json(changed=True)
+
+ else: # state == 'absent' and not snapshot
+ self.exit_json(changed=False)
- if self.conn.volume_exists(self.params['volume']):
- if self.ansible.check_mode:
- self.exit_json(changed=self._system_state_change())
- if state == 'present':
- self._present_volume_snapshot()
- if state == 'absent':
- self._absent_volume_snapshot()
+ def _create(self):
+ args = dict()
+ for k in ['description', 'force', 'name']:
+ if self.params[k] is not None:
+ args[k] = self.params[k]
+
+ volume_name_or_id = self.params['volume']
+ volume = self.conn.block_storage.find_volume(volume_name_or_id,
+ ignore_missing=False)
+ args['volume_id'] = volume.id
+
+ snapshot = self.conn.block_storage.create_snapshot(**args)
+
+ if self.params['wait']:
+ snapshot = self.conn.block_storage.wait_for_status(
+ snapshot, wait=self.params['timeout'])
+
+ return snapshot
+
+ def _delete(self, snapshot):
+ self.conn.block_storage.delete_snapshot(snapshot)
+ if self.params['wait']:
+ self.conn.block_storage.wait_for_delete(
+ snapshot, wait=self.params['timeout'])
+
+ def _will_change(self, state, snapshot):
+ if state == 'present' and not snapshot:
+ return True
+ elif state == 'present' and snapshot:
+ # We do not support snapshot updates yet
+ # TODO: Implement module updates
+ return False
+ elif state == 'absent' and snapshot:
+ return True
else:
- self.fail_json(
- msg="No volume with name or id '{0}' was found.".format(
- self.params['volume']))
+ # state == 'absent' and not snapshot:
+ return False
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot_info.py b/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot_info.py
index fa50055da..0e2e7939d 100644
--- a/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot_info.py
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_snapshot_info.py
@@ -1,11 +1,11 @@
#!/usr/bin/python
-# coding: utf-8 -*-
-#
+# -*- coding: utf-8 -*-
+
# Copyright (c) 2020 by Open Telekom Cloud, operated by T-Systems International GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: volume_snapshot_info
short_description: Get volume snapshots
@@ -16,32 +16,30 @@ options:
details:
description: More detailed output
type: bool
- default: True
name:
description:
- Name of the Snapshot.
type: str
- volume:
- description:
- - Name of the volume.
- type: str
status:
description:
- Specifies the snapshot status.
- choices: [creating, available, error, deleting,
- error_deleting, rollbacking, backing-up]
+ choices: ['available', 'backing-up', 'creating', 'deleted', 'deleting',
+ 'error', 'error_deleting', 'restoring', 'unmanaging']
+ type: str
+ volume:
+ description:
+ - Name or ID of the volume.
type: str
-requirements: ["openstacksdk"]
extends_documentation_fragment:
-- openstack.cloud.openstack
+ - openstack.cloud.openstack
'''
-RETURN = '''
+RETURN = r'''
volume_snapshots:
description: List of dictionaries describing volume snapshots.
type: list
elements: dict
- returned: always.
+ returned: always
contains:
created_at:
description: Snapshot creation time.
@@ -53,12 +51,26 @@ volume_snapshots:
description: Unique UUID.
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
+ is_forced:
+ description: Indicate whether to create snapshot,
+ even if the volume is attached.
+ type: bool
metadata:
description: Snapshot metadata.
type: dict
name:
description: Snapshot Name.
type: str
+ progress:
+ description: The percentage of completeness the snapshot is
+ currently at.
+ type: str
+ project_id:
+ description: The project ID this snapshot is associated with.
+ type: str
+ size:
+ description: The size of the volume, in GBs.
+ type: int
status:
description: Snapshot status.
type: str
@@ -68,63 +80,53 @@ volume_snapshots:
volume_id:
description: Volume ID.
type: str
-
'''
-EXAMPLES = '''
-# Get snapshots.
-- openstack.cloud.volume_snapshot_info:
- register: snapshots
+EXAMPLES = r'''
+- name: List all snapshots
+ openstack.cloud.volume_snapshot_info:
-- openstack.cloud.volume_snapshotbackup_info:
+- name: Fetch data about a single snapshot
+ openstack.cloud.volume_snapshot_info:
name: my_fake_snapshot
- register: snapshot
'''
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
class VolumeSnapshotInfoModule(OpenStackModule):
- module_min_sdk_version = '0.49.0'
-
argument_spec = dict(
- details=dict(default=True, type='bool'),
- name=dict(required=False, type='str'),
- volume=dict(required=False, type='str'),
- status=dict(required=False, type='str',
- choices=['creating', 'available', 'error',
- 'deleting', 'error_deleting', 'rollbacking',
- 'backing-up']),
+ details=dict(type='bool'),
+ name=dict(),
+ status=dict(choices=['available', 'backing-up', 'creating', 'deleted',
+ 'deleting', 'error', 'error_deleting',
+ 'restoring', 'unmanaging']),
+ volume=dict(),
)
+
module_kwargs = dict(
supports_check_mode=True
)
def run(self):
-
- details_filter = self.params['details']
- name_filter = self.params['name']
- volume_filter = self.params['volume']
- status_filter = self.params['status']
-
- data = []
- query = {}
- if name_filter:
- query['name'] = name_filter
- if volume_filter:
- query['volume_id'] = self.conn.block_storage.find_volume(volume_filter)
- if status_filter:
- query['status'] = status_filter.lower()
-
- for raw in self.conn.block_storage.snapshots(details_filter, **query):
- dt = raw.to_dict()
- dt.pop('location')
- data.append(dt)
-
- self.exit_json(
- changed=False,
- volume_snapshots=data
- )
+ kwargs = dict((k, self.params[k])
+ for k in ['details', 'name', 'status']
+ if self.params[k] is not None)
+
+ volume_name_or_id = self.params['volume']
+ volume = None
+ if volume_name_or_id:
+ volume = self.conn.block_storage.find_volume(volume_name_or_id)
+ if volume:
+ kwargs['volume_id'] = volume.id
+
+ if volume_name_or_id and not volume:
+ snapshots = []
+ else:
+ snapshots = [b.to_dict(computed=False)
+ for b in self.conn.block_storage.snapshots(**kwargs)]
+
+ self.exit_json(changed=False, volume_snapshots=snapshots)
def main():
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_type.py b/ansible_collections/openstack/cloud/plugins/modules/volume_type.py
new file mode 100644
index 000000000..4f9f3be71
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_type.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Cleura AB
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: volume_type
+short_description: Manage OpenStack volume type
+author: OpenStack Ansible SIG
+description:
+ - Add, remove or update volume types in OpenStack.
+options:
+ name:
+ description:
+ - Volume type name or id.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the volume type.
+ type: str
+ extra_specs:
+ description:
+ - List of volume type properties
+ type: dict
+ is_public:
+ description:
+ - Make volume type accessible to the public.
+ - Can be set only during creation
+ type: bool
+ state:
+ description:
+ - Indicate desired state of the resource.
+ - When I(state) is C(present), then I(is_public) is required.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+ - name: Delete volume type by name
+ openstack.cloud.volume_type:
+ name: test_type
+ state: absent
+
+ - name: Delete volume type by id
+ openstack.cloud.volume_type:
+ name: fbadfa6b-5f17-4c26-948e-73b94de57b42
+ state: absent
+
+ - name: Create volume type
+ openstack.cloud.volume_type:
+ name: unencrypted_volume_type
+ state: present
+ extra_specs:
+ volume_backend_name: LVM_iSCSI
+ description: Unencrypted volume type
+ is_public: True
+'''
+
+RETURN = '''
+volume_type:
+ description: Dictionary describing volume type
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ name:
+ description: volume type name
+ returned: success
+ type: str
+ sample: test_type
+ extra_specs:
+ description: volume type extra parameters
+ returned: success
+ type: dict
+ sample: null
+ is_public:
+ description: whether the volume type is public
+ returned: success
+ type: bool
+ sample: True
+ description:
+ description: volume type description
+ returned: success
+ type: str
+ sample: Unencrypted volume type
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class VolumeTypeModule(OpenStackModule):
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ extra_specs=dict(type='dict', required=False),
+ is_public=dict(type='bool'),
+ state=dict(
+ type='str', default='present', choices=['absent', 'present']),
+ )
+ module_kwargs = dict(
+ required_if=[('state', 'present', ['is_public'])],
+ supports_check_mode=True,
+ )
+
+ @staticmethod
+ def _extract_result(details):
+ if details is not None:
+ return details.to_dict(computed=False)
+ return {}
+
+ def run(self):
+ state = self.params['state']
+ name_or_id = self.params['name']
+ volume_type = self.conn.block_storage.find_type(name_or_id)
+
+ if self.ansible.check_mode:
+ self.exit_json(
+ changed=self._will_change(state, volume_type))
+
+ if state == 'present' and not volume_type:
+ # Create type
+ create_result = self._create()
+ volume_type = self._extract_result(create_result)
+ self.exit_json(changed=True, volume_type=volume_type)
+
+ elif state == 'present' and volume_type:
+ # Update type
+ update = self._build_update(volume_type)
+ update_result = self._update(volume_type, update)
+ volume_type = self._extract_result(update_result)
+ self.exit_json(changed=bool(update), volume_type=volume_type)
+
+ elif state == 'absent' and volume_type:
+ # Delete type
+ self._delete(volume_type)
+ self.exit_json(changed=True)
+
+ def _build_update(self, volume_type):
+ return {
+ **self._build_update_extra_specs(volume_type),
+ **self._build_update_volume_type(volume_type)}
+
+ def _build_update_extra_specs(self, volume_type):
+ update = {}
+
+ old_extra_specs = volume_type['extra_specs']
+ new_extra_specs = self.params['extra_specs'] or {}
+
+ delete_extra_specs_keys = \
+ set(old_extra_specs.keys()) - set(new_extra_specs.keys())
+
+ if delete_extra_specs_keys:
+ update['delete_extra_specs_keys'] = delete_extra_specs_keys
+
+ stringified = {k: str(v) for k, v in new_extra_specs.items()}
+
+ if old_extra_specs != stringified:
+ update['create_extra_specs'] = new_extra_specs
+
+ return update
+
+ def _build_update_volume_type(self, volume_type):
+ update = {}
+ allowed_attributes = [
+ 'is_public', 'description', 'name']
+ type_attributes = {
+ k: self.params[k]
+ for k in allowed_attributes
+ if k in self.params and self.params.get(k) is not None
+ and self.params.get(k) != volume_type.get(k)}
+
+ if type_attributes:
+ update['type_attributes'] = type_attributes
+
+ return update
+
+ def _create(self):
+ kwargs = {k: self.params[k]
+ for k in ['name', 'is_public', 'description', 'extra_specs']
+ if self.params.get(k) is not None}
+ volume_type = self.conn.block_storage.create_type(**kwargs)
+ return volume_type
+
+ def _delete(self, volume_type):
+ self.conn.block_storage.delete_type(volume_type.id)
+
+ def _update(self, volume_type, update):
+ if not update:
+ return volume_type
+ volume_type = self._update_volume_type(volume_type, update)
+ volume_type = self._update_extra_specs(volume_type, update)
+ return volume_type
+
+ def _update_extra_specs(self, volume_type, update):
+ delete_extra_specs_keys = update.get('delete_extra_specs_keys')
+ if delete_extra_specs_keys:
+ self.conn.block_storage.delete_type_extra_specs(
+ volume_type, delete_extra_specs_keys)
+ # refresh volume_type information
+ volume_type = self.conn.block_storage.find_type(volume_type.id)
+
+ create_extra_specs = update.get('create_extra_specs')
+ if create_extra_specs:
+ self.conn.block_storage.update_type_extra_specs(
+ volume_type, **create_extra_specs)
+ # refresh volume_type information
+ volume_type = self.conn.block_storage.find_type(volume_type.id)
+
+ return volume_type
+
+ def _update_volume_type(self, volume_type, update):
+ type_attributes = update.get('type_attributes')
+ if type_attributes:
+ updated_type = self.conn.block_storage.update_type(
+ volume_type, **type_attributes)
+ return updated_type
+ return volume_type
+
+ def _will_change(self, state, volume_type):
+ if state == 'present' and not volume_type:
+ return True
+ if state == 'present' and volume_type:
+ return bool(self._build_update(volume_type))
+ if state == 'absent' and volume_type:
+ return True
+ return False
+
+
+def main():
+ module = VolumeTypeModule()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_type_access.py b/ansible_collections/openstack/cloud/plugins/modules/volume_type_access.py
new file mode 100644
index 000000000..cf1251776
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_type_access.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: volume_type_access
+short_description: Manage access to OpenStack block-storage volume type
+author: OpenStack Ansible SIG
+description:
+ - Add or remove access to OpenStack block-storage volume type
+options:
+ name:
+ description:
+ - Name or ID of the block-storage volume type.
+ required: true
+ type: str
+ project:
+ description:
+ - ID or Name of project to grant.
+ - Allow I(project) to access private volume type (name or ID).
+ type: str
+ required: true
+ project_domain:
+ description:
+ - Domain the project belongs to (name or ID).
+ - This can be used in case collisions between project names exist.
+ type: str
+ state:
+ description:
+ - Indicate whether project should have access to volume type or not.
+ default: present
+ type: str
+ choices: ['present', 'absent']
+notes:
+ - A volume type must not be private to manage project access.
+extends_documentation_fragment:
+- openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+- name: Grant access to volume type vol-type-001
+ openstack.cloud.volume_type_access:
+ cloud: devstack
+ name: vol-type-001
+ project: demo
+ state: present
+
+- name: Revoke access to volume type
+ openstack.cloud.volume_type_access:
+ cloud: devstack
+ name: vol-type-001
+ project: demo
+ state: absent
+'''
+
+RETURN = '''
+volume_type:
+ description: Dictionary describing the volume type.
+ returned: success
+ type: dict
+ contains:
+ description:
+ description: Description of the type.
+ returned: success
+ type: str
+ extra_specs:
+ description: A dict of extra specifications.
+ "capabilities" is a usual key.
+ returned: success
+ type: dict
+ id:
+ description: Volume type ID.
+ returned: success
+ type: str
+ is_public:
+ description: Volume type is accessible to the public.
+ returned: success
+ type: bool
+ name:
+ description: Volume type name.
+ returned: success
+ type: str
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class VolumeTypeAccess(OpenStackModule):
+ argument_spec = dict(
+ name=dict(required=True),
+ project=dict(required=True),
+ project_domain=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
+
+ # TODO: Merge with equal function from compute_flavor_access module.
+ def _project_and_project_domain(self):
+ project_name_or_id = self.params['project']
+ project_domain_name_or_id = self.params['project_domain']
+
+ if project_domain_name_or_id:
+ domain_id = self.conn.identity.find_domain(
+ project_domain_name_or_id, ignore_missing=False).id
+ else:
+ domain_id = None
+
+ kwargs = dict() if domain_id is None else dict(domain_id=domain_id)
+
+ if project_name_or_id:
+ project_id = self.conn.identity.find_project(
+ project_name_or_id, ignore_missing=False, *kwargs).id
+ else:
+ project_id = None
+
+ return project_id, domain_id
+
+ def run(self):
+ name_or_id = self.params['name']
+
+ # Workaround for an issue in openstacksdk where
+ # self.conn.block_storage.find_type() will not
+ # find private volume types.
+ volume_types = \
+ list(self.conn.block_storage.types(is_public=False)) \
+ + list(self.conn.block_storage.types(is_public=True))
+
+ volume_type = [volume_type for volume_type in volume_types
+ if volume_type.id == name_or_id
+ or volume_type.name == name_or_id][0]
+
+ state = self.params['state']
+ if state == 'present' and volume_type.is_public:
+ raise ValueError('access can only be granted to private types')
+
+ project_id, domain_id = self._project_and_project_domain()
+
+ volume_type_access = \
+ self.conn.block_storage.get_type_access(volume_type.id)
+ project_ids = [access.get('project_id')
+ for access in volume_type_access]
+
+ if (project_id in project_ids and state == 'present') \
+ or (project_id not in project_ids and state == 'absent'):
+ self.exit_json(changed=False,
+ volume_type=volume_type.to_dict(computed=False))
+
+ if self.ansible.check_mode:
+ self.exit_json(changed=True,
+ volume_type=volume_type.to_dict(computed=False))
+
+ if project_id in project_ids: # and state == 'absent'
+ self.conn.block_storage.remove_type_access(volume_type.id,
+ project_id)
+ else: # project_id not in project_ids and state == 'present'
+ self.conn.block_storage.add_type_access(volume_type.id,
+ project_id)
+
+ self.exit_json(changed=True,
+ volume_type=volume_type.to_dict(computed=False))
+
+
+def main():
+ module = VolumeTypeAccess()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_type_encryption.py b/ansible_collections/openstack/cloud/plugins/modules/volume_type_encryption.py
new file mode 100644
index 000000000..a6f8339b3
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_type_encryption.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Cleura AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: volume_type_encryption
+short_description: Manage OpenStack volume type encryption
+author: OpenStack Ansible SIG
+description:
+ - Add, remove or update volume type encryption in OpenStack.
+options:
+ volume_type:
+ description:
+ - Volume type name or id.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicate desired state of the resource.
+ - When I(state) is C(present), then I(encryption options) are required.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ encryption_provider:
+ description:
+ - class that provides encryption support for the volume type
+ - admin only
+ type: str
+ encryption_cipher:
+ description:
+ - encryption algorithm or mode
+ - admin only
+ type: str
+ encryption_control_location:
+ description:
+ - Set the notional service where the encryption is performed
+ - admin only
+ choices: ['front-end', 'back-end']
+ type: str
+ encryption_key_size:
+ description:
+ - Set the size of the encryption key of this volume type
+ - admin only
+ choices: [128, 256, 512]
+ type: int
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+ - name: Create volume type encryption
+ openstack.cloud.volume_type_encryption:
+ volume_type: test_type
+ state: present
+ encryption_provider: nova.volume.encryptors.luks.LuksEncryptor
+ encryption_cipher: aes-xts-plain64
+ encryption_control_location: front-end
+ encryption_key_size: 256
+
+ - name: Delete volume type encryption
+ openstack.cloud.volume_type_encryption:
+ volume_type: test_type
+ state: absent
+ register: the_result
+'''
+
+RETURN = '''
+encryption:
+ description: Dictionary describing volume type encryption
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ cipher:
+ description: encryption cipher
+ returned: success
+ type: str
+ sample: aes-xts-plain64
+ control_location:
+ description: encryption location
+ returned: success
+ type: str
+ sample: front-end
+ created_at:
+ description: Resource creation date and time
+ returned: success
+ type: str
+ sample: "2023-08-04T10:23:03.000000"
+ deleted:
+ description: Boolean if the resource was deleted
+ returned: success
+ type: str
+ sample: false,
+ deleted_at:
+ description: Resource delete date and time
+ returned: success
+ type: str
+ sample: null,
+ encryption_id:
+ description: UUID of the volume type encryption
+ returned: success
+ type: str
+ sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
+ id:
+ description: Alias to encryption_id
+ returned: success
+ type: str
+ sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
+ key_size:
+ description: Size of the key
+ returned: success
+ type: str
+ sample: 256,
+ provider:
+ description: Encryption provider
+ returned: success
+ type: str
+ sample: "nova.volume.encryptors.luks.LuksEncryptor"
+ updated_at:
+ description: Resource last update date and time
+ returned: success
+ type: str
+ sample: null
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class VolumeTypeModule(OpenStackModule):
+ argument_spec = dict(
+ volume_type=dict(type='str', required=True),
+ state=dict(
+ type='str', default='present', choices=['absent', 'present']),
+ encryption_provider=dict(type='str', required=False),
+ encryption_cipher=dict(type='str', required=False),
+ encryption_control_location=dict(
+ type='str', choices=['front-end', 'back-end'], required=False),
+ encryption_key_size=dict(
+ type='int', choices=[128, 256, 512], required=False),
+ )
+ module_kwargs = dict(
+ required_if=[('state', 'present', [
+ 'encryption_provider', 'encryption_cipher',
+ 'encryption_control_location', 'encryption_key_size'])],
+ supports_check_mode=True,
+ )
+
+ @staticmethod
+ def _extract_result(details):
+ if details is not None:
+ return details.to_dict(computed=False)
+ return {}
+
+ def run(self):
+ state = self.params['state']
+ name = self.params['volume_type']
+ volume_type = self.conn.block_storage.find_type(name)
+
+ # TODO: Add get type_encryption by id
+ type_encryption = self.conn.block_storage.get_type_encryption(
+ volume_type.id)
+ encryption_id = type_encryption.get('encryption_id')
+
+ if self.ansible.check_mode:
+ self.exit_json(
+ changed=self._will_change(state, encryption_id))
+
+ if state == 'present':
+ update = self._build_update_type_encryption(type_encryption)
+ if not bool(update):
+ # No change is required
+ self.exit_json(changed=False)
+
+ if not encryption_id: # Create new type encryption
+ result = self.conn.block_storage.create_type_encryption(
+ volume_type, **update)
+ else: # Update existing type encryption
+ result = self.conn.block_storage.update_type_encryption(
+ encryption=type_encryption, **update)
+ encryption = self._extract_result(result)
+ self.exit_json(changed=bool(update), encryption=encryption)
+ elif encryption_id is not None:
+ # absent state requires type encryption delete
+ self.conn.block_storage.delete_type_encryption(type_encryption)
+ self.exit_json(changed=True)
+
+ def _build_update_type_encryption(self, type_encryption):
+ attributes_map = {
+ 'encryption_provider': 'provider',
+ 'encryption_cipher': 'cipher',
+ 'encryption_key_size': 'key_size',
+ 'encryption_control_location': 'control_location'}
+
+ encryption_attributes = {
+ attributes_map[k]: self.params[k]
+ for k in self.params
+ if k in attributes_map.keys() and self.params.get(k) is not None
+ and self.params.get(k) != type_encryption.get(attributes_map[k])}
+
+ if 'encryption_provider' in encryption_attributes.keys():
+ encryption_attributes['provider'] = \
+ encryption_attributes['encryption_provider']
+
+ return encryption_attributes
+
+ def _update_type_encryption(self, type_encryption, update):
+ if update:
+ updated_type = self.conn.block_storage.update_type_encryption(
+ encryption=type_encryption,
+ **update)
+ return updated_type
+ return {}
+
+ def _will_change(self, state, type_encryption):
+ encryption_id = type_encryption.get('encryption_id')
+ if state == 'present' and not encryption_id:
+ return True
+ if state == 'present' and encryption_id is not None:
+ return bool(self._build_update_type_encryption(type_encryption))
+ if state == 'absent' and encryption_id is not None:
+ return True
+ return False
+
+
+def main():
+ module = VolumeTypeModule()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/plugins/modules/volume_type_info.py b/ansible_collections/openstack/cloud/plugins/modules/volume_type_info.py
new file mode 100644
index 000000000..55654ce27
--- /dev/null
+++ b/ansible_collections/openstack/cloud/plugins/modules/volume_type_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Cleura AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r'''
+---
+module: volume_type_info
+short_description: Get OpenStack volume type details
+author: OpenStack Ansible SIG
+description:
+ - Get volume type details in OpenStack.
+ - Get volume type encryption details in OpenStack
+options:
+ name:
+ description:
+ - Volume type name or id.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - openstack.cloud.openstack
+'''
+
+EXAMPLES = r'''
+ - name: Get volume type details
+ openstack.cloud.volume_type_info:
+ name: test_type
+
+ - name: Get volume type details by id
+ openstack.cloud.volume_type_info:
+ name: fbadfa6b-5f17-4c26-948e-73b94de57b42
+'''
+
+RETURN = '''
+access_project_ids:
+ description:
+ - List of project IDs allowed to access volume type
+ - Public volume types returns 'null' value as it is not applicable
+ returned: On success when I(state) is 'present'
+ type: list
+ elements: str
+volume_type:
+ description: Dictionary describing volume type
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ id:
+ description: volume_type uuid
+ returned: success
+ type: str
+ sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
+ name:
+ description: volume type name
+ returned: success
+ type: str
+ sample: test_type
+ extra_specs:
+ description: volume type extra parameters
+ returned: success
+ type: dict
+ sample: null
+ is_public:
+ description: whether the volume type is public
+ returned: success
+ type: bool
+ sample: True
+ description:
+ description: volume type description
+ returned: success
+ type: str
+ sample: Unencrypted volume type
+encryption:
+ description: Dictionary describing volume type encryption
+ returned: On success when I(state) is 'present'
+ type: dict
+ contains:
+ cipher:
+ description: encryption cipher
+ returned: success
+ type: str
+ sample: aes-xts-plain64
+ control_location:
+ description: encryption location
+ returned: success
+ type: str
+ sample: front-end
+ created_at:
+ description: Resource creation date and time
+ returned: success
+ type: str
+ sample: "2023-08-04T10:23:03.000000"
+ deleted:
+ description: Boolean if the resource was deleted
+ returned: success
+ type: str
+ sample: false
+ deleted_at:
+ description: Resource delete date and time
+ returned: success
+ type: str
+ sample: null
+ encryption_id:
+ description: UUID of the volume type encryption
+ returned: success
+ type: str
+ sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
+ id:
+ description: Alias to encryption_id
+ returned: success
+ type: str
+ sample: b75d8c5c-a6d8-4a5d-8c86-ef4f1298525d
+ key_size:
+ description: Size of the key
+ returned: success
+ type: str
+ sample: 256
+ provider:
+ description: Encryption provider
+ returned: success
+ type: str
+ sample: "nova.volume.encryptors.luks.LuksEncryptor"
+ updated_at:
+ description: Resource last update date and time
+ returned: success
+ type: str
+ sample: null
+'''
+
+from ansible_collections.openstack.cloud.plugins.module_utils.openstack import OpenStackModule
+
+
+class VolumeTypeModule(OpenStackModule):
+ argument_spec = dict(
+ name=dict(type='str', required=True)
+ )
+ module_kwargs = dict(
+ supports_check_mode=True,
+ )
+
+ @staticmethod
+ def _extract_result(details):
+ if details is not None:
+ return details.to_dict(computed=False)
+ return {}
+
+ def run(self):
+ name_or_id = self.params['name']
+ volume_type = self.conn.block_storage.find_type(name_or_id)
+
+ type_encryption = self.conn.block_storage.get_type_encryption(
+ volume_type.id)
+
+ if volume_type.is_public:
+ type_access = None
+ else:
+ type_access = [
+ proj['project_id']
+ for proj in self.conn.block_storage.get_type_access(
+ volume_type.id)]
+
+ self.exit_json(
+ changed=False,
+ volume_type=self._extract_result(volume_type),
+ encryption=self._extract_result(type_encryption),
+ access_project_ids=type_access)
+
+
+def main():
+ module = VolumeTypeModule()
+ module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/openstack/cloud/requirements.txt b/ansible_collections/openstack/cloud/requirements.txt
index ff1e9021c..3d959de5b 100644
--- a/ansible_collections/openstack/cloud/requirements.txt
+++ b/ansible_collections/openstack/cloud/requirements.txt
@@ -1 +1 @@
-openstacksdk>=0.36,<0.99.0
+openstacksdk>=1.0.0
diff --git a/ansible_collections/openstack/cloud/scripts/inventory/openstack.yml b/ansible_collections/openstack/cloud/scripts/inventory/openstack.yml
deleted file mode 100644
index 8053fb8fd..000000000
--- a/ansible_collections/openstack/cloud/scripts/inventory/openstack.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-clouds:
- vexxhost:
- profile: vexxhost
- auth:
- project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
- username: fb886a9b-c37b-442a-9be3-964bed961e04
- password: fantastic-password1
- rax:
- profile: rackspace
- auth:
- username: example
- password: spectacular-password
- project_id: 2352426
- region_name: DFW,ORD,IAD
- devstack:
- auth:
- auth_url: https://devstack.example.com
- username: stack
- password: stack
- project_name: stack
-ansible:
- use_hostnames: True
- expand_hostvars: False
- fail_on_errors: True
diff --git a/ansible_collections/openstack/cloud/scripts/inventory/openstack_inventory.py b/ansible_collections/openstack/cloud/scripts/inventory/openstack_inventory.py
deleted file mode 100644
index f0b2ff896..000000000
--- a/ansible_collections/openstack/cloud/scripts/inventory/openstack_inventory.py
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
-# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
-# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2016, Rackspace Australia
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-# The OpenStack Inventory module uses os-client-config for configuration.
-# https://github.com/openstack/os-client-config
-# This means it will either:
-# - Respect normal OS_* environment variables like other OpenStack tools
-# - Read values from a clouds.yaml file.
-# If you want to configure via clouds.yaml, you can put the file in:
-# - Current directory
-# - ~/.config/openstack/clouds.yaml
-# - /etc/openstack/clouds.yaml
-# - /etc/ansible/openstack.yml
-# The clouds.yaml file can contain entries for multiple clouds and multiple
-# regions of those clouds. If it does, this inventory module will by default
-# connect to all of them and present them as one contiguous inventory. You
-# can limit to one cloud by passing the `--cloud` parameter, or use the
-# OS_CLOUD environment variable. If caching is enabled, and a cloud is
-# selected, then per-cloud cache folders will be used.
-#
-# See the adjacent openstack.yml file for an example config file
-# There are two ansible inventory specific options that can be set in
-# the inventory section.
-# expand_hostvars controls whether or not the inventory will make extra API
-# calls to fill out additional information about each server
-# use_hostnames changes the behavior from registering every host with its UUID
-# and making a group of its hostname to only doing this if the
-# hostname in question has more than one server
-# fail_on_errors causes the inventory to fail and return no hosts if one cloud
-# has failed (for example, bad credentials or being offline).
-# When set to False, the inventory will return hosts from
-# whichever other clouds it can contact. (Default: True)
-#
-# Also it is possible to pass the correct user by setting an ansible_user: $myuser
-# metadata attribute.
-
-import argparse
-import collections
-import os
-import sys
-import time
-from ansible.module_utils.six import raise_from
-try:
- from ansible.module_utils.compat.version import StrictVersion
-except ImportError:
- try:
- from distutils.version import StrictVersion
- except ImportError as exc:
- raise_from(ImportError('To use this plugin or module with ansible-core'
- ' < 2.11, you need to use Python < 3.12 with '
- 'distutils.version present'), exc)
-from io import StringIO
-
-import json
-
-import openstack as sdk
-from openstack.cloud import inventory as sdk_inventory
-from openstack.config import loader as cloud_config
-
-CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
-
-
-def get_groups_from_server(server_vars, namegroup=True):
- groups = []
-
- region = server_vars['region']
- cloud = server_vars['cloud']
- metadata = server_vars.get('metadata', {})
-
- # Create a group for the cloud
- groups.append(cloud)
-
- # Create a group on region
- if region:
- groups.append(region)
-
- # And one by cloud_region
- groups.append("%s_%s" % (cloud, region))
-
- # Check if group metadata key in servers' metadata
- if 'group' in metadata:
- groups.append(metadata['group'])
-
- for extra_group in metadata.get('groups', '').split(','):
- if extra_group:
- groups.append(extra_group.strip())
-
- groups.append('instance-%s' % server_vars['id'])
- if namegroup:
- groups.append(server_vars['name'])
-
- for key in ('flavor', 'image'):
- if 'name' in server_vars[key]:
- groups.append('%s-%s' % (key, server_vars[key]['name']))
-
- for key, value in iter(metadata.items()):
- groups.append('meta-%s_%s' % (key, value))
-
- az = server_vars.get('az', None)
- if az:
- # Make groups for az, region_az and cloud_region_az
- groups.append(az)
- groups.append('%s_%s' % (region, az))
- groups.append('%s_%s_%s' % (cloud, region, az))
- return groups
-
-
-def get_host_groups(inventory, refresh=False, cloud=None):
- (cache_file, cache_expiration_time) = get_cache_settings(cloud)
- if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
- groups = to_json(get_host_groups_from_cloud(inventory))
- with open(cache_file, 'w') as f:
- f.write(groups)
- else:
- with open(cache_file, 'r') as f:
- groups = f.read()
- return groups
-
-
-def append_hostvars(hostvars, groups, key, server, namegroup=False):
- hostvars[key] = dict(
- ansible_ssh_host=server['interface_ip'],
- ansible_host=server['interface_ip'],
- openstack=server)
-
- metadata = server.get('metadata', {})
- if 'ansible_user' in metadata:
- hostvars[key]['ansible_user'] = metadata['ansible_user']
-
- for group in get_groups_from_server(server, namegroup=namegroup):
- groups[group].append(key)
-
-
-def get_host_groups_from_cloud(inventory):
- groups = collections.defaultdict(list)
- firstpass = collections.defaultdict(list)
- hostvars = {}
- list_args = {}
- if hasattr(inventory, 'extra_config'):
- use_hostnames = inventory.extra_config['use_hostnames']
- list_args['expand'] = inventory.extra_config['expand_hostvars']
- if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"):
- list_args['fail_on_cloud_config'] = \
- inventory.extra_config['fail_on_errors']
- else:
- use_hostnames = False
-
- for server in inventory.list_hosts(**list_args):
-
- if 'interface_ip' not in server:
- continue
- firstpass[server['name']].append(server)
- for name, servers in firstpass.items():
- if len(servers) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- server_ids = set()
- # Trap for duplicate results
- for server in servers:
- server_ids.add(server['id'])
- if len(server_ids) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- for server in servers:
- append_hostvars(
- hostvars, groups, server['id'], server,
- namegroup=True)
- groups['_meta'] = {'hostvars': hostvars}
- return groups
-
-
-def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
- ''' Determines if cache file has expired, or if it is still valid '''
- if refresh:
- return True
- if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
- mod_time = os.path.getmtime(cache_file)
- current_time = time.time()
- if (mod_time + cache_expiration_time) > current_time:
- return False
- return True
-
-
-def get_cache_settings(cloud=None):
- config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
- if cloud:
- config = cloud_config.OpenStackConfig(
- config_files=config_files).get_one(cloud=cloud)
- else:
- config = cloud_config.OpenStackConfig(
- config_files=config_files).get_all()[0]
- # For inventory-wide caching
- cache_expiration_time = config.get_cache_expiration_time()
- cache_path = config.get_cache_path()
- if cloud:
- cache_path = '{0}_{1}'.format(cache_path, cloud)
- if not os.path.exists(cache_path):
- os.makedirs(cache_path)
- cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
- return (cache_file, cache_expiration_time)
-
-
-def to_json(in_dict):
- return json.dumps(in_dict, sort_keys=True, indent=2)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
- parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
- help='Cloud name (default: None')
- parser.add_argument('--private',
- action='store_true',
- help='Use private address for ansible host')
- parser.add_argument('--refresh', action='store_true',
- help='Refresh cached information')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Enable debug output')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specific host')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- try:
- # openstacksdk library may write to stdout, so redirect this
- sys.stdout = StringIO()
- config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
- sdk.enable_logging(debug=args.debug)
- inventory_args = dict(
- refresh=args.refresh,
- config_files=config_files,
- private=args.private,
- cloud=args.cloud,
- )
- if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
- inventory_args.update(dict(
- config_key='ansible',
- config_defaults={
- 'use_hostnames': False,
- 'expand_hostvars': True,
- 'fail_on_errors': True,
- }
- ))
-
- inventory = sdk_inventory.OpenStackInventory(**inventory_args)
-
- sys.stdout = sys.__stdout__
- if args.list:
- output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
- elif args.host:
- output = to_json(inventory.get_host(args.host))
- print(output)
- except sdk.exceptions.OpenStackCloudException as e:
- sys.stderr.write('%s\n' % e.message)
- sys.exit(1)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()