summaryrefslogtreecommitdiffstats
path: root/ansible_collections/amazon/aws
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/amazon/aws')
-rw-r--r--ansible_collections/amazon/aws/CHANGELOG.rst161
-rw-r--r--ansible_collections/amazon/aws/CI.md2
-rw-r--r--ansible_collections/amazon/aws/FILES.json358
-rw-r--r--ansible_collections/amazon/aws/MANIFEST.json6
-rw-r--r--ansible_collections/amazon/aws/README.md10
-rw-r--r--ansible_collections/amazon/aws/changelogs/changelog.yaml224
-rw-r--r--ansible_collections/amazon/aws/docs/docsite/links.yml2
-rw-r--r--ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst161
-rw-r--r--ansible_collections/amazon/aws/meta/runtime.yml15
-rw-r--r--ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py17
-rw-r--r--ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py6
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/acm.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/botocore.py20
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/common.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/ec2.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/elbv2.py108
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/iam.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/modules.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/policy.py57
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/rds.py41
-rw-r--r--ansible_collections/amazon/aws/plugins/module_utils/s3.py4
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py29
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudformation.py40
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/cloudtrail.py13
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami.py43
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py61
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni.py27
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance.py260
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py116
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py163
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py8
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vol.py18
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py3
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py27
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py16
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_policy.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_role.py40
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/iam_role_info.py23
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key.py79
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/kms_key_info.py50
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_event.py249
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/lambda_info.py2
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster.py10
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py275
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py157
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py388
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py (renamed from ansible_collections/amazon/aws/plugins/modules/rds_param_group.py)15
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/route53_health_check.py12
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_bucket.py798
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object.py162
-rw-r--r--ansible_collections/amazon/aws/plugins/modules/s3_object_info.py6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml42
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml1
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml13
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/meta/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/alb_with_multiple_listener_certs.yml127
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/files/deny-assume.json10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml61
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml5
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml86
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml20
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml10
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml14
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml30
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml40
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml4
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml6
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/aliases3
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/defaults/main.yaml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/tasks/main.yaml328
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml12
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml189
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml56
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml2
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/library/test_s3_upload_multipart.py137
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_multipart_upload.yml185
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml51
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml7
-rw-r--r--ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml8
-rw-r--r--ansible_collections/amazon/aws/tests/sanity/ignore-2.18.txt1
-rw-r--r--ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py68
-rw-r--r--ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_resource_transforms.py6
-rw-r--r--ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py12
-rw-r--r--ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py61
-rw-r--r--ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py137
-rw-r--r--ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_event.py544
-rw-r--r--ansible_collections/amazon/aws/tox.ini12
119 files changed, 5486 insertions, 1329 deletions
diff --git a/ansible_collections/amazon/aws/CHANGELOG.rst b/ansible_collections/amazon/aws/CHANGELOG.rst
index 219d962b4..f867fc9e4 100644
--- a/ansible_collections/amazon/aws/CHANGELOG.rst
+++ b/ansible_collections/amazon/aws/CHANGELOG.rst
@@ -4,6 +4,104 @@ amazon.aws Release Notes
.. contents:: Topics
+v8.0.0
+======
+
+Release Summary
+---------------
+
+This major release brings several new features, bug fixes, and deprecated features. It also includes the removal of some functionality for ``iam_role, iam_role_info`` and ``module_utils.policy`` that were previously deprecated. We have also removed support for ``ansible-core<2.15``.
+
+Minor Changes
+-------------
+
+- autoscaling_group - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- cloudformation - apply automatic retries when paginating through stack events without a filter (https://github.com/ansible-collections/amazon.aws/pull/2049).
+- cloudtrail - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- ec2_instance - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+- ec2_vol - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- elb_classic_lb - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- kms_key - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- lambda_event - Add support for setting the ``maximum_batching_window_in_seconds`` option (https://github.com/ansible-collections/amazon.aws/pull/2025).
+- module_uils/botocore - support sets and tuples of errors as well as lists (https://github.com/ansible-collections/amazon.aws/pull/1829).
+- module_utils/elbv2 - Add support for adding listener with multiple certificates during ALB creation. Allows elb_application_elb module to handle mentioned use case. (https://github.com/ansible-collections/amazon.aws/pull/1950).
+- module_utils/elbv2 - Add the possibility to update ``SslPolicy``, ``Certificates`` and ``AlpnPolicy`` for TLS listeners (https://github.com/ansible-collections/amazon.aws/issues/1198).
+- rds_instance - Allow passing empty list to ``enable_cloudwatch_logs_exports`` in order to remove all existing exports (https://github.com/ansible-collections/amazon.aws/pull/1917).
+- s3_bucket - refactor s3_bucket module code for improved readability and maintainability (https://github.com/ansible-collections/amazon.aws/pull/2057).
+- s3_object - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- amazon.aws collection - Support for ansible-core < 2.15 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/2093).
+- iam_role - ``iam_role.assume_role_policy_document`` is no longer converted from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- iam_role_info - ``iam_role.assume_role_policy_document`` is no longer converted from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- kms_key - the ``policies`` return value has been renamed to ``key_policies`` the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- kms_key_info - the ``policies`` return value has been renamed to ``key_policies`` the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- lambda_event - | ``batch_size`` no longer defaults to 100. According to the boto3 API (https://boto3.amazonaws.com/v1/documentation/api/1.26.78/reference/services/lambda.html#Lambda.Client.create_event_source_mapping), ``batch_size`` defaults to 10 for sqs sources and to 100 for stream sources (https://github.com/ansible-collections/amazon.aws/pull/2025).
+
+Deprecated Features
+-------------------
+
+- aws_ec2 inventory plugin - removal of the previously deprecated ``include_extra_api_calls`` option has been assigned to release 9.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- cloudformation - the ``template`` parameter has been deprecated and will be removed in a release after 2026-05-01. The ``template_body`` parameter can be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+- iam_policy - removal of the previously deprecated ``policies`` return key has been assigned to release 9.0.0. Use the ``policy_names`` return key instead (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- rds_param_group - the ``rds_param_group`` module has been renamed to ``rds_instance_param_group``. The usage of the module has not changed. The rds_param_group alias will be removed in version 10.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2058).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- iam_role - the ``iam_role.assume_role_policy_document_raw`` return value has been deprecated. ``iam_role.assume_role_policy_document`` now returns the same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- iam_role_info - the ``iam_role.assume_role_policy_document_raw`` return value has been deprecated. ``iam_role.assume_role_policy_document`` now returns the same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- module_utils.policy - the previously deprecated ``sort_json_policy_dict()`` function has been removed, consider using ``compare_policies()`` instead (https://github.com/ansible-collections/amazon.aws/pull/2052).
+
+Bugfixes
+--------
+
+- elb_classic_lb - fixes bug where ``proxy_protocol`` not being set or being set to ``None`` may result in unexpected behaviour or errors (https://github.com/ansible-collections/amazon.aws/pull/2049).
+- lambda_event - Fix when ``batch_size`` is greater than 10, by enabling support for setting ``maximum_batching_window_in_seconds`` (https://github.com/ansible-collections/amazon.aws/pull/2025).
+- lambda_event - Retrieve function ARN using AWS API (get_function) instead of building it with AWS account information (https://github.com/ansible-collections/amazon.aws/issues/1859).
+
+v7.6.0
+======
+
+Release Summary
+---------------
+
+This release brings several bugfixes, minor changes and some new rds modules (``rds_cluster_param_group``, ``rds_cluster_param_group_info`` and ``rds_engine_versions_info``). It also introduces a deprecation for the ``cloudformation`` module.
+
+Minor Changes
+-------------
+
+- ec2_instance - add support for ``host`` option in placement.tenancy (https://github.com/ansible-collections/amazon.aws/pull/2026).
+- ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+
+Deprecated Features
+-------------------
+
+- cloudformation - the ``template`` parameter has been deprecated and will be removed in a release after 2026-05-01. The ``template_body`` parameter can be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+
+Bugfixes
+--------
+
+- iam_managed_policy - fixes bug that causes ``ParamValidationError`` when attempting to delete a policy that's attached to a role or a user (https://github.com/ansible-collections/amazon.aws/issues/2067).
+- iam_role_info - fixes bug in handling paths missing the ``/`` prefix and/or suffix (https://github.com/ansible-collections/amazon.aws/issues/2065).
+- s3_object - fix idempotency issue when copying object uploaded using multipart upload (https://github.com/ansible-collections/amazon.aws/issues/2016).
+
+New Modules
+-----------
+
+- rds_cluster_param_group - Manage RDS cluster parameter groups
+- rds_cluster_param_group_info - Describes the properties of specific RDS cluster parameter group.
+- rds_engine_versions_info - Describes the properties of specific versions of DB engines.
+
v7.5.0
======
@@ -244,6 +342,69 @@ New Modules
- ec2_import_image_info - Gather information about import virtual machine tasks
- rds_global_cluster_info - Obtain information about Aurora global database clusters
+v6.5.4
+======
+
+Release Summary
+---------------
+
+This release includes bugfixes for the ``cloudwatchlogs_log_group_info`` module and the inventory plugins.
+
+Bugfixes
+--------
+
+- cloudwatchlogs_log_group_info - Implement exponential backoff when making API calls to prevent throttling exceptions (https://github.com/ansible-collections/amazon.aws/issues/2011).
+- plugin_utils.inventory - Ensure templated options in lookup plugins are converted (https://github.com/ansible-collections/amazon.aws/issues/1955).
+
+v6.5.3
+======
+
+Release Summary
+---------------
+
+This release includes bugfixes for the``cloudwatchevent_rule`` module and ``secretsmanager_secret`` lookup plugin.
+
+Bugfixes
+--------
+
+- cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided input_template (https://github.com/ansible-collections/amazon.aws/pull/1883).
+- lookup/secretsmanager_secret - fix the issue when the nested secret is missing and on_missing is set to warn, the lookup was raising an error instead of a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781).
+
+v6.5.2
+======
+
+Release Summary
+---------------
+
+This release includes a bugfix for the ``amazon.aws.aws_ec2`` inventory plugin when retrieving information for more than 40 instances with ``use_ssm_inventory``.
+
+Bugfixes
+--------
+
+- plugins/inventory/aws_ec2 - Fix failure when retrieving information for more than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713).
+
+v6.5.1
+======
+
+Release Summary
+---------------
+
+This release includes several bugfixes.
+
+Minor Changes
+-------------
+
+- ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848).
+
+Bugfixes
+--------
+
+- ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error (https://github.com/ansible-collections/amazon.aws/pull/1650).
+- ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time (https://github.com/ansible-collections/amazon.aws/pull/1848).
+- s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest`` (https://github.com/ansible-collections/amazon.aws/pull/1847).
+- s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support tags (https://github.com/ansible-collections/amazon.aws/issues/1607).
+- s3_object - when doing a put and specifying ``Content-Type`` in metadata, this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None`` causing the put to fail. Fix now correctly honours the specified ``Content-Type`` (https://github.com/ansible-collections/amazon.aws/issues/1881).
+
v6.5.0
======
diff --git a/ansible_collections/amazon/aws/CI.md b/ansible_collections/amazon/aws/CI.md
index adc42482a..9b6c78590 100644
--- a/ansible_collections/amazon/aws/CI.md
+++ b/ansible_collections/amazon/aws/CI.md
@@ -2,7 +2,7 @@
## AWS Collections
-GitHub Actions are used to run the Continuous Integration for amazon.aws collection. The workflows used for the CI can be found [here](https://github.com/ansible-collections/amazon.aws/tree/stable-7/.github/workflows). These workflows include jobs to run the unit tests, integration tests, sanity tests, linters, changelog check and doc related checks. The following table lists the python and ansible versions against which these jobs are run.
+GitHub Actions are used to run the Continuous Integration for amazon.aws collection. The workflows used for the CI can be found [here](https://github.com/ansible-collections/amazon.aws/tree/stable-8/.github/workflows). These workflows include jobs to run the unit tests, integration tests, sanity tests, linters, changelog check and doc related checks. The following table lists the python and ansible versions against which these jobs are run.
| Jobs | Description | Python Versions | Ansible Versions |
| ------ |-------| ------ | -----------|
diff --git a/ansible_collections/amazon/aws/FILES.json b/ansible_collections/amazon/aws/FILES.json
index 61352c6b6..ad0709691 100644
--- a/ansible_collections/amazon/aws/FILES.json
+++ b/ansible_collections/amazon/aws/FILES.json
@@ -221,7 +221,7 @@
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fab623a9b576e9d450f5285c5ad77eed36f30882a396eeba97046f4b8fdbf3cd",
+ "chksum_sha256": "d715aea5ddd297081527d05cc6d228ce4d3e181dcc9bb050ccbd6a6d8952d73a",
"format": 1
},
{
@@ -256,7 +256,7 @@
"name": "docs/docsite/rst/CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f60899f9e09f217d9c8963676ddad7d070ce9233e0f32c02b96ad1839ec3cd9f",
+ "chksum_sha256": "da8a721530a2d8c3c3e98ddc0f99b457215be51f4dac8b2a89f388a4ed0813c9",
"format": 1
},
{
@@ -298,7 +298,7 @@
"name": "docs/docsite/links.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40b9f123ae7842857f70b823dd5620cc7ad06c6e1f06983ad581f5aa21029789",
+ "chksum_sha256": "b72e5133d2ef165b4ab66aed1a59121dcf5070b78751ef7af15edbc0f306bb14",
"format": 1
},
{
@@ -312,7 +312,7 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2fabb91839258dc50bcf1925e53f0aafdeb2f187fc252f2f70939d667207ec7c",
+ "chksum_sha256": "ac5c11f98b5966b21849b80f9d34c786433a12160e86aa537a0b4f19f7302ef9",
"format": 1
},
{
@@ -431,7 +431,7 @@
"name": "plugins/inventory/aws_ec2.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3f3aa496835abc1f5d3686d68ff5e50252fdf46c491ef6effc85815ec3e36c3",
+ "chksum_sha256": "0b71ac6256e1d399db81ab54a414d3bed2a00628198004840113106f8e305bf9",
"format": 1
},
{
@@ -459,21 +459,21 @@
"name": "plugins/lookup/aws_collection_constants.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2be170ee49ffe1032dbf2bd6bfd4721599bef128bb4424b707856175beba8b3b",
+ "chksum_sha256": "b3952eec1b6fccb10b8755b7d1478e00abd1e3bfb255979dc438c588a6cf83c7",
"format": 1
},
{
"name": "plugins/lookup/aws_service_ip_ranges.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "002391d821e70902d5d58f111fa095552d6236f493d9bbefc44f797ba3dcf14c",
+ "chksum_sha256": "a9bbcb1b4e3c504a928e51854e3f920628ddc470166215daec995e742950bf2f",
"format": 1
},
{
"name": "plugins/lookup/secretsmanager_secret.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "89cf70f3f030acf9773e8a9b8aef94e5c424d675a4099fa33075b13a32167e80",
+ "chksum_sha256": "2ed5f6b922ef2c7bc4b803312cbcf0d1191a26572a311674a60746ae3c0297c7",
"format": 1
},
{
@@ -494,7 +494,7 @@
"name": "plugins/module_utils/acm.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b7d7425e00290759785de0f4c83bd6247e3fb4745a2da48721981c76865fa648",
+ "chksum_sha256": "9a99b8632e7fe768c5d78cec4936784b342a76d7da8b9b1f87b7033e55e54247",
"format": 1
},
{
@@ -522,7 +522,7 @@
"name": "plugins/module_utils/botocore.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92fd10ff6db03a74d5bb0f973e956a33f09d24489e608506e52899175684eda8",
+ "chksum_sha256": "309acd7b7183d6b12f09539366d32fc46e517a2ee904afc2f4fb1cd433eafb43",
"format": 1
},
{
@@ -543,7 +543,7 @@
"name": "plugins/module_utils/common.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cef7b396d560a646961755d2a54c7131e553dfe26fbb26e04be073cce5bb0095",
+ "chksum_sha256": "b4931cbe751c155b1c71aae1b5c36f22301d289c7b2051e821fe51613c8a55a9",
"format": 1
},
{
@@ -564,7 +564,7 @@
"name": "plugins/module_utils/ec2.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "936be7a0317850e190866fb6056c9cadb0b478be0b592d50951664ceba1e9b3d",
+ "chksum_sha256": "a3238f9f1ce1f1c09a9c515c59d403088f814d2b7b5c1c65b9954544e5c44236",
"format": 1
},
{
@@ -578,7 +578,7 @@
"name": "plugins/module_utils/elbv2.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d15f3ae29632e8d16bc76ca410c25f5449f350aba78dae643febc8a763f99f04",
+ "chksum_sha256": "8092136fb3d8803ccf2d9474d7aa916b425283e61c6826ca5038f9a490cceb35",
"format": 1
},
{
@@ -599,28 +599,28 @@
"name": "plugins/module_utils/iam.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1cd5d5532049e4afd2858a35480cd173f72c4ed9174c67bb26186e47fe183ba5",
+ "chksum_sha256": "b2a6130696f5323dab36892f695d485fe89d0f04667bd08cb1f5d0c70c5a1d9c",
"format": 1
},
{
"name": "plugins/module_utils/modules.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a2ee4448ed60b569db05ccd679fe94283764369e73c2065c2ffdd3d98fb00486",
+ "chksum_sha256": "84a3a62d80370c61e1cd95816c8f328b3b3dfc863a0c6785a0c51fcd3af8f4ee",
"format": 1
},
{
"name": "plugins/module_utils/policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "506ff6fc6d3505c88609520d9a41e26f9d976f003d0f06cfe4d9bba1cf86c65c",
+ "chksum_sha256": "e8f25c0113d98d8941a5fc13b756c05ba0f4e6368f3fccfa4810d80d67b53964",
"format": 1
},
{
"name": "plugins/module_utils/rds.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6471ccfddef1b9e14da5ddbba61b6da6aeea93c49544e4c13fc865b81f0e9164",
+ "chksum_sha256": "1623ea7cd2d4536e7a7497a822fa7846749a08a1524bf496ad1374885dce665f",
"format": 1
},
{
@@ -641,7 +641,7 @@
"name": "plugins/module_utils/s3.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fa3caa0404d81c7a15ece8d7088f08c69626a2e08dcabf187771c9d6e84868b2",
+ "chksum_sha256": "2f3ec99690f68c29457d4b04b0fb18755647fa409a87669f06360fe8e7b9aca7",
"format": 1
},
{
@@ -697,7 +697,7 @@
"name": "plugins/modules/autoscaling_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58117ad5dbf878ed764d0449c3dfae62e666815fa308eaebefc0bee7ca773a27",
+ "chksum_sha256": "f8c89e7496adb20dbc3f3cb1934efc033e944fb6657e52c3143338e7ea25dce8",
"format": 1
},
{
@@ -795,7 +795,7 @@
"name": "plugins/modules/cloudformation.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fa729a1ddb3196b4e6227c7eaa535edf45f6dc5094ed57b51140fad233ae87f6",
+ "chksum_sha256": "8db5349baa49448cc4fbad544af33a7c9cec6aa7894bb3ceddda30a465c3574c",
"format": 1
},
{
@@ -809,7 +809,7 @@
"name": "plugins/modules/cloudtrail.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4804d5364e1681a1c2926cc84872865eb62e20d57b7bca997de67ce709616af8",
+ "chksum_sha256": "e26c260292647f658be27b38b57aeba8802cd9891ac2984f931f704e05814838",
"format": 1
},
{
@@ -865,14 +865,14 @@
"name": "plugins/modules/ec2_ami.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0757b11eb9acef39eb674e47acd89a610fcead711a4c8de303b10bad7c900517",
+ "chksum_sha256": "74896198bfee77da5fc121f7d3e9cffcd33fcf331b94138980ab428b5bf8a879",
"format": 1
},
{
"name": "plugins/modules/ec2_ami_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b97d9c04ec5f504d8e1284eebd074fc4ce268700601ca09e9c57ce4f91e955c9",
+ "chksum_sha256": "7d59b2d8478ef918340be80a7c913381770764c9c1f3cb5416b0cb7a838626bd",
"format": 1
},
{
@@ -886,21 +886,21 @@
"name": "plugins/modules/ec2_eip_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5c9406d58cddcb5e4f4bee96273ab8f264a0b5750eb4498c81f0713680607520",
+ "chksum_sha256": "eaaf4c42294a5512b1dd47e5b5c0e98fdb90d2cfee3eff7fab479e0f59d5ac0d",
"format": 1
},
{
"name": "plugins/modules/ec2_eni.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88d056f7fc85634b30f5c00501c1cc67d3a2c0dc39455398c5e14668079ee313",
+ "chksum_sha256": "de0e264d45e50aae330b18d2102cf75f847c1215a6a09e0ff20e1ca4b8196368",
"format": 1
},
{
"name": "plugins/modules/ec2_eni_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f1db2ca6dc313be56cce8a4970c29cd38536172745772e5ab9840b4817e753ae",
+ "chksum_sha256": "4e1bf14d5bc08404255eb0f8145f09ed3fb1f34221a4efe74979fbb099f6266a",
"format": 1
},
{
@@ -921,14 +921,14 @@
"name": "plugins/modules/ec2_instance.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b731a8ca8cc6cb9760700b68bb72bcaf4f31218569886e4237203c073b493aa7",
+ "chksum_sha256": "ffd0c2409add19494978e82ca7451aed82d416dc6fc3702a0dff629cbe6ece6e",
"format": 1
},
{
"name": "plugins/modules/ec2_instance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "036e792937d3a55b5a43094e74c4c0c4b7b9982d3eded7b7d28f67491dd58cb0",
+ "chksum_sha256": "6688f94dfe4a62894c64f508b72a46bed2a54d05a9a3190214a3ffd9ebf0da00",
"format": 1
},
{
@@ -949,21 +949,21 @@
"name": "plugins/modules/ec2_metadata_facts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d27460949e9c35fc706d1602aad4137fb2527461f355056f81d9f2b714cdfd15",
+ "chksum_sha256": "27929f05a4f472bb3bce739921f5327a263ae24e17920aacd910a615850c586e",
"format": 1
},
{
"name": "plugins/modules/ec2_security_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e86e40d3f1074e93d19a63fd8d369fabc85b94a067cb49d946feb57f71dadecb",
+ "chksum_sha256": "fc3698f0966e826e886f177c3f4058d6f1c4e82035e40815f8e61c720456fe5e",
"format": 1
},
{
"name": "plugins/modules/ec2_security_group_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3bdf6ad7f9a88f1e87fb3963dd4d5189713ad08cc338471179ff6b87e0f7e318",
+ "chksum_sha256": "50f4bbe475ec1bca18c34f5bb097b92f7933361caed1640bceb5e27a6739c361",
"format": 1
},
{
@@ -1012,7 +1012,7 @@
"name": "plugins/modules/ec2_vol.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d24fcfef21b2a977ba4ba8a96e4de3ae83981f9c3c756a56c4bcdc94ec9ce93f",
+ "chksum_sha256": "fe07a9bfb7925ba5004a9f2be27573324e0360bc29c0fd9dc0d6e01cfc12dfc9",
"format": 1
},
{
@@ -1103,7 +1103,7 @@
"name": "plugins/modules/ec2_vpc_route_table.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c7b8b59c5ef4102fe6a095b8d48fb3e2c83703e81f9c9f77184c0c0363683a10",
+ "chksum_sha256": "bb1617639fc00615a10cc55939a3ece5a2e85d14d51268d2e9b85be39bef5548",
"format": 1
},
{
@@ -1131,7 +1131,7 @@
"name": "plugins/modules/elb_application_lb.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2451345c585062e3c39a62da4291be7478e2db5a74f6f12f5783bce6fdc8961b",
+ "chksum_sha256": "ae5a975a37dfdf4facfbc5a94f34ddceeef89928faf95433b23d562c2079c872",
"format": 1
},
{
@@ -1145,7 +1145,7 @@
"name": "plugins/modules/elb_classic_lb.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "00664954f71d3ccd3441bda765c134589be5bc016cee802be92516bfedb7ec20",
+ "chksum_sha256": "ff5bd43133dc8176e0479501979e8b1bedbb7bf8507e206da31f954eb1ef7a5e",
"format": 1
},
{
@@ -1208,7 +1208,7 @@
"name": "plugins/modules/iam_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "664bb0af3653c14b6eb2abe5ed48fec6d0eed428ff714ffbea36b1f273462a4b",
+ "chksum_sha256": "00a602cd8eebb0eb8fdc305917b41f52f9ec5ba454e60bd213e65eb5c1cafc01",
"format": 1
},
{
@@ -1222,14 +1222,14 @@
"name": "plugins/modules/iam_role.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c173f8a6bd4e64f91a23d7dfef6f30e3a3fd4dffe797d8f906a102de930664fd",
+ "chksum_sha256": "9061d5324b63cdc58a842c5e1d21892f27ac20df0aff2c97222c99e2c7b53317",
"format": 1
},
{
"name": "plugins/modules/iam_role_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4d384be35ea61b426a5ee9758e356b43e30d0d7bd071e50f8b192f06345a2c56",
+ "chksum_sha256": "7a3f682a8f8830a0275233a55a6ef9a8e314a55ebea53e4e861f55711fb1ec9f",
"format": 1
},
{
@@ -1250,14 +1250,14 @@
"name": "plugins/modules/kms_key.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d446696fb60d8da18aaf6eaec2d84e5eeb7a18c3d0589ce2e89c7d8b631c8d74",
+ "chksum_sha256": "e25568d292ce5b021470ae5dd3150f29fec6260622c90ee2c9f17aed3819d1b5",
"format": 1
},
{
"name": "plugins/modules/kms_key_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c5811daba5959a939dec7fc5936eec3024c825c720d5ddbb13309b5c5c965477",
+ "chksum_sha256": "8d29447319868152716b94e4fa952d35faecd5a39f8b6cfcd7d67b69562f5bdf",
"format": 1
},
{
@@ -1278,7 +1278,7 @@
"name": "plugins/modules/lambda_event.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2276f30d87f6c569e145a641611ac50e57b12b048fe69f3bffd515f0d3b23150",
+ "chksum_sha256": "3dde0c0a4e704e0489fdc68ff49a54d05f1d47ee61fca5910acda707dc1a1c71",
"format": 1
},
{
@@ -1292,7 +1292,7 @@
"name": "plugins/modules/lambda_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4bd9b8c9cdc11c0ae73fe4a252ad023aa2503e5e2d845d4f8dabd984b9242347",
+ "chksum_sha256": "a916feabe54856fd2ca1c190cc7f5f10f3706673454c7eaf18c2ae721cfc020c",
"format": 1
},
{
@@ -1320,7 +1320,7 @@
"name": "plugins/modules/rds_cluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "54abecf06781a6e1cb9abdbe6f4ce63ba742ff30fadc4c691dc378f366f837ab",
+ "chksum_sha256": "52d143815e168f6a8902300aee11477f12fdd433b6b06b0d8f2db48e3665f0a9",
"format": 1
},
{
@@ -1331,6 +1331,20 @@
"format": 1
},
{
+ "name": "plugins/modules/rds_cluster_param_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72280419ed4b96ffdd83f9c71b5ae11c9b2f9c91d8cc724835504b2eab772486",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rds_cluster_param_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88366c308385ac90b32d303fcd723e31a91bd2c943c5b23d072d52613389943c",
+ "format": 1
+ },
+ {
"name": "plugins/modules/rds_cluster_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -1338,6 +1352,13 @@
"format": 1
},
{
+ "name": "plugins/modules/rds_engine_versions_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "611b8cd17f5f48e66102525884a8478ae8de94782a8b64f58efc7e49d45d2526",
+ "format": 1
+ },
+ {
"name": "plugins/modules/rds_global_cluster_info.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -1348,7 +1369,7 @@
"name": "plugins/modules/rds_instance.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "504e06fb4396104e8862c84c168592fba08be53442290308b81851e118290d5c",
+ "chksum_sha256": "85e1518a78d5a4b6d1edd522ceb113fc68ffc9ca408f960a774e54da51535d5c",
"format": 1
},
{
@@ -1359,6 +1380,13 @@
"format": 1
},
{
+ "name": "plugins/modules/rds_instance_param_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8773ac163a1751321085fc0a480e55cf006b6d03207965f19a8a14df3f9aec3d",
+ "format": 1
+ },
+ {
"name": "plugins/modules/rds_instance_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -1380,13 +1408,6 @@
"format": 1
},
{
- "name": "plugins/modules/rds_param_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "549686e39733802d5ae6a9a07b1ccd463224e5eb414a1afab495082b24bcd369",
- "format": 1
- },
- {
"name": "plugins/modules/rds_snapshot_info.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -1411,7 +1432,7 @@
"name": "plugins/modules/route53_health_check.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c34142ed7d3f7728d6072e8389ee64c6223af92c2bef3e95bccc7b4329578468",
+ "chksum_sha256": "9bf3e8ea9182d544cd23b5b1a9aad5c6539bf9d1f5c26856c323837ed015a63e",
"format": 1
},
{
@@ -1432,7 +1453,7 @@
"name": "plugins/modules/s3_bucket.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2e0a82a49cd95a5171cbf36964814f605cd12c9d4cb96e643cadabb8e216c1b",
+ "chksum_sha256": "590ff2f40a99b6cecc72c33f0fe410880336145438abd28aa60e9370e4705e9c",
"format": 1
},
{
@@ -1446,14 +1467,14 @@
"name": "plugins/modules/s3_object.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "47610c1c778b26def1d5c3aef8c5f6d2089537445ae420d1da5f1afb4a0b8600",
+ "chksum_sha256": "e17e5c421f076a0748478720a844df908d2350c46b63c8c03769995352d03da0",
"format": 1
},
{
"name": "plugins/modules/s3_object_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9dd2dce52f773c41a7ff167eb16511f0a15f6d56c64258b0c1cd380cb7632604",
+ "chksum_sha256": "a6b0837b0dceb593bb88eba9d765ad0274bae582ec74e19da038d6eda590b4f0",
"format": 1
},
{
@@ -1775,7 +1796,7 @@
"name": "tests/integration/targets/aws_region_info/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fb318b5de7438166fae97aee9ef2c91a1627b7b812e4b33ad1ac43abe9fddc5c",
+ "chksum_sha256": "9acb5a901f9cb39c4eb5f270fb82fd1650c01cc68b4f085b7427f7398581d9ea",
"format": 1
},
{
@@ -1887,7 +1908,7 @@
"name": "tests/integration/targets/backup_selection/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "62babbcf4801df3634de64cbdbbcb9799e7196e68e79ae2467eef00f94d006e3",
+ "chksum_sha256": "06017040cb63c7219aceb5f1355d0e57f59b94fa2d988bae246fe84c841e62ed",
"format": 1
},
{
@@ -2216,7 +2237,7 @@
"name": "tests/integration/targets/cloudtrail/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf758e82bdee23917b64cb50d3819fcb0e37a08b839651667d650233a2912e95",
+ "chksum_sha256": "a9cb2880678971b4cf7c219d1386406a8aff391baee0595e152bf2989be03e9c",
"format": 1
},
{
@@ -3385,7 +3406,7 @@
"name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8090dd20b9fd152460ecb440752342981b5a7dd622b70f21d285c8d237fc8427",
+ "chksum_sha256": "070f55805c4bf94cff515299783013b40d0da550fdff8e3c4fcd746ec25bd687",
"format": 1
},
{
@@ -4568,7 +4589,7 @@
"name": "tests/integration/targets/ec2_vol/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "34570fb0003b114ab3f32a96dd54baed32237644756383a4cbce1586b464ab3e",
+ "chksum_sha256": "1949441e222e6b7fe0a3f344850ada6bb8e28baa7385a380b735956b038a8193",
"format": 1
},
{
@@ -5044,7 +5065,21 @@
"name": "tests/integration/targets/elb_application_lb/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "117345e107fdfcd7c61eb0efd221c8dfc89f50f16995fed04ce042a696a206a1",
+ "chksum_sha256": "96485e804f115e34eb4dcc22296b533132a1b5e7377aaf3ebdcd1a8a1ca61792",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/elb_application_lb/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/elb_application_lb/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d6e22eb282a5ef5a9b95f7ea5a0ab29337ae87205ec20f7a29fd732a4d15f17",
"format": 1
},
{
@@ -5055,10 +5090,17 @@
"format": 1
},
{
+ "name": "tests/integration/targets/elb_application_lb/tasks/alb_with_multiple_listener_certs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba5a7dafb52e116b621694ef5dced0a4e083eac0856be442281155eca97c8b69",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/elb_application_lb/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b67635615261ebad66e54c874a157697fa8b1077ba4162f449054365c06c8425",
+ "chksum_sha256": "da7642df21db7298f3bbbe8ec97dd5639a56ed480a581b9d18918ca5a193387d",
"format": 1
},
{
@@ -5464,7 +5506,7 @@
"name": "tests/integration/targets/iam_group/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aae88f8768d56e9f0da979319e7693c61c02e58b0fc4b42ec6e7e191cf28eca5",
+ "chksum_sha256": "192edd49b90afe7d78f0d0e7d13270d444debd1b9c23325857750c449a3ffedc",
"format": 1
},
{
@@ -5555,7 +5597,7 @@
"name": "tests/integration/targets/iam_instance_profile/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9e42e5dde8164648b63372350df5fcec9ba73c739385f5c3c72c10558f65d201",
+ "chksum_sha256": "c5d3dc19fa0c29a44b027cad450447041b1dd0f38934cce127c3076541cb2e11",
"format": 1
},
{
@@ -5590,7 +5632,21 @@
"name": "tests/integration/targets/iam_managed_policy/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "93d9ea38d4e024e6d3586b12ade39769ebb91141c2a7bf2b2ad6fc378d6a14d4",
+ "chksum_sha256": "759855f441d8979bd29a3059e400e54345e2651877806a0439f6e2c3fbc66a5c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iam_managed_policy/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iam_managed_policy/files/deny-assume.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "252bc63ef45bb6343320a9afacb88299ac8badf8b2cfbb2ecfd0a443dc28fa2f",
"format": 1
},
{
@@ -5618,7 +5674,7 @@
"name": "tests/integration/targets/iam_managed_policy/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "84b8e44df62276a2a8fb0be783a6c5443f3253fdf7bbeb013cbf7164ed6af3f6",
+ "chksum_sha256": "088fc86bbbd03afe0818993ab660b74aa4314bdbcd8cba76f7536b16e4cb23e2",
"format": 1
},
{
@@ -5667,7 +5723,7 @@
"name": "tests/integration/targets/iam_password_policy/tasks/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "17535f416ceea8bc36cd1ba23ff7b303d806ed83ca9363496baa255d5f106908",
+ "chksum_sha256": "15bc89da02be6f6d5cc9e527498b75531c4eaadd1ab368e8f36c22af376cc80b",
"format": 1
},
{
@@ -5758,7 +5814,7 @@
"name": "tests/integration/targets/iam_policy/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ab3fbd83a86f0eeb1c53097aa9a3c44140d66c347f3b3a16f2dd91d02d126e4",
+ "chksum_sha256": "bbc9206834079d8c2a0496d79a6e70bc4fd8502c49f774db3e1a568a1846e19c",
"format": 1
},
{
@@ -5793,7 +5849,7 @@
"name": "tests/integration/targets/iam_role/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "741abccb048f212947c96cdb810e5c2894714b4d588ef572336e9e2b8d11e427",
+ "chksum_sha256": "359c0e7cdd4d39463467cb765ffe11b91c2796301d83b75edc8068b18d3cd573",
"format": 1
},
{
@@ -5856,77 +5912,77 @@
"name": "tests/integration/targets/iam_role/tasks/boundary_policy.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "64d9dd8ca827b13ec32824934db94c3854a78710382bacc5d685906054b47dd7",
+ "chksum_sha256": "3a98327df29d30b67036b3934446f5ccfc2edd99ebbac8fde56fdb85304cc039",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/complex_role_creation.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0a52547e18b9336fd168fc9f511deecbfad4356d0c65c08753dec9f50bbef78f",
+ "chksum_sha256": "4cc4b530c2e5d2773cbfd98bb1de5cd809bbfbb44d7d4eac7b42b5806cdc490b",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/creation_deletion.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7ed84bbc84db3f546f63df4fdc0a359cf523817edb3a0a1ff7e758b54876b162",
+ "chksum_sha256": "f54eaf422f769b49d439f4148a31d4cd56c95ad77b5d3c1f3efb2f56a3a86a8a",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/description_update.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8e58de3daadf95cb8479641bf23a89402c6a0802a4ec4f45745aeba3972b1c44",
+ "chksum_sha256": "87b762ee83d77421b78e71fc0b4ec017fad520f06a4349b9d6fc26ca38f42120",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/inline_policy_update.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6c82ffe31cb45ba34076251d0d71871f7b9fa2d2849271a0dda2090c1bc3d32f",
+ "chksum_sha256": "67486baab98a84c0ad6bffd21d1b29ff7609900476736d297b7e1707976e5390",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a56f46cb51b1a365bfbb62c0f986f5146389797f5c7a8a491537f5dfdc8b9818",
+ "chksum_sha256": "b970fdbfad230b92fa7ccbe35fcdc2cc560933d46ba1850221a829fa9a2c4afa",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/max_session_update.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ec05f81e22efca10da58ecc5523fb0cd89209eca5eaa85941c14483b6f0c95ce",
+ "chksum_sha256": "6a0f8d80351117f8781bc7ec3adccc300011a99fa2baf3152350aa0a16469d36",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/parameter_checks.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "32204f0d4d1c5f399d358b30c46e3ded2884214fac5aea6f5dd5fd0fa4490b57",
+ "chksum_sha256": "e29eef33395e6aeb9d754dba6d640298589623de5818c37cb006aa067acbf6a5",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/policy_update.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eb74e9f499be8906682a08ea75178de84b1e4302f4eb300df77eb198ca6d9e4b",
+ "chksum_sha256": "650898eef42a40fcb7f80e46360efdd93783b829a22153adeced56e3b967a7fd",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/role_removal.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d31edc2bc4c61e150adbec443b48c558b83764952ee007919836c53f2332adee",
+ "chksum_sha256": "84266d9d96f9a8a08b1e5beba54fd39ba49f5f50eca1de5477b251d79eec7992",
"format": 1
},
{
"name": "tests/integration/targets/iam_role/tasks/tags_update.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d130601264f877ded1a81afcc4d695f4b4458db5d7017c464f6a910a4d7f1c74",
+ "chksum_sha256": "427ee5a026f085bc0a3b5fa681b4ceee7ff9b2ce63c3adb0e9b27369c198cfa1",
"format": 1
},
{
@@ -5996,7 +6052,7 @@
"name": "tests/integration/targets/iam_user/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fca44566391f40361845be32c7178bf6dc49804b7f19bbd2f02314b956ab5bdb",
+ "chksum_sha256": "d0e8932f7042782ce6c06198d484fd5874ef1f26e9facf38cb2ca95207ea21de",
"format": 1
},
{
@@ -6157,7 +6213,7 @@
"name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e372db3e66570a3ec99cf82144c0ae3477065dd9589d17d4ca1d6d7c1dd877b1",
+ "chksum_sha256": "1268f6613b4fc6de091ce44d0414bd956904ae61f02a1166786e2646670d3cfc",
"format": 1
},
{
@@ -6626,14 +6682,14 @@
"name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c782911da94cb4988bffc549f08e427fbc1bdf39f5a2afeb9cb00e000c8d627b",
+ "chksum_sha256": "076e4ad88c1c0c494e423b1385033afc295cb3aaee3eedc469704b0d7ab9c23d",
"format": 1
},
{
"name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b6307eef0b11b17182fcf930a523e3a08f7a86714433abc5a697cca4ca98c287",
+ "chksum_sha256": "4de0a962cdebc773e46d8e98b0f85711ce8c1110967c7f866214c262e3956da2",
"format": 1
},
{
@@ -6773,7 +6829,7 @@
"name": "tests/integration/targets/lambda/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "158dacd3059d5f761f02bd8a6fa025c5ca3c4746097b6059702e9ec87e85971c",
+ "chksum_sha256": "e2acedb2e10b0a305fcf9a89a30d8b19ad270bfdad146054db1150944b3d141d",
"format": 1
},
{
@@ -6857,7 +6913,7 @@
"name": "tests/integration/targets/lambda_alias/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fad37b015cecc39aab3e080d572a96bd81d379e0f86093c4c50df66b1c26fd8d",
+ "chksum_sha256": "efc4da58e3aa37859b3f645e5a20ee9aec39026bbf08ab2599ce9d0d6e13b9ea",
"format": 1
},
{
@@ -6934,21 +6990,21 @@
"name": "tests/integration/targets/lambda_event/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8f2e0d97fcaed9c7b72de4dbcb3ccbfeccc6e736939061367f0e2d3e5548fae1",
+ "chksum_sha256": "a87cffe1267dd514f3f1dce793f135efd867852cbd7382872d15d65f6d7b4257",
"format": 1
},
{
"name": "tests/integration/targets/lambda_event/tasks/setup.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "38be23da5e06f42ee93238b265d6322edb3837645719284600ecc7126afadc0f",
+ "chksum_sha256": "563e40aaeb7ae433c3f4533c44e935dc2da9c0f1e7d5db6a1a5c19d0b409e823",
"format": 1
},
{
"name": "tests/integration/targets/lambda_event/tasks/teardown.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8a9a5e92990bbfe9a6cc8b54d63e69160bc627575da74f51bf399df9da663eb3",
+ "chksum_sha256": "aa57c6b258d8832221ecabed655bd4b830a557f4277641fda317885aab400dd7",
"format": 1
},
{
@@ -7067,7 +7123,7 @@
"name": "tests/integration/targets/lambda_policy/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0b8f7ec6930a42885e71c8544d31274966ad12a8ebb932f1d6238f3821d4096e",
+ "chksum_sha256": "abac3a6782941145983eee6fe69701d90e755c4e6e13b5862f557e3539690431",
"format": 1
},
{
@@ -7897,6 +7953,48 @@
"format": 1
},
{
+ "name": "tests/integration/targets/rds_cluster_param_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rds_cluster_param_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rds_cluster_param_group/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78700a32b519a47837c515e07f0866af6c17054c7a6bd92be13e658c96a5ee03",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rds_cluster_param_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rds_cluster_param_group/tasks/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "341c36bba5f85c189827ac74d41378957f5a042e39238ddb8bffb54f2a4b9bf1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rds_cluster_param_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81a91f0a74f0df45dacfdf9852dd53b3d4f91a4a5eea810c0b8d3f4e78181371",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/rds_cluster_promote",
"ftype": "dir",
"chksum_type": null,
@@ -8222,7 +8320,7 @@
"name": "tests/integration/targets/rds_instance_complex/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9c1211c77b57aa2f1b14a77d59fab7b3f6ffb6ed87a0877266302eb20af08ff1",
+ "chksum_sha256": "002a8503cfa24dd65864d91bf7ffc3b8e4fede1384daff2fcc767009a55cdcca",
"format": 1
},
{
@@ -8264,7 +8362,7 @@
"name": "tests/integration/targets/rds_instance_complex/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b4095a8bfeb09d5b42f6b122a8cfd3d2c1c38e905091b7099b970503e07b9264",
+ "chksum_sha256": "12a546f097ea2e3c14cfb763420c9d26b66f860d6b121e3ba334508d715f3c33",
"format": 1
},
{
@@ -8292,7 +8390,7 @@
"name": "tests/integration/targets/rds_instance_modify/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "662a79a2f3a0652893a1670e9d458a497041a9674fa3e845fc8a96b1ae06d3d5",
+ "chksum_sha256": "6f40a4edf26c98d33dd5052243c68709f961ef09c72958f4876e6f8adbb31981",
"format": 1
},
{
@@ -8320,7 +8418,7 @@
"name": "tests/integration/targets/rds_instance_modify/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d08bb138ad6e81e3b4f0466d5dd9e2e874ab44bfaccd8d327fb3bc9a453bf3bf",
+ "chksum_sha256": "6ceab35775a16e513f60a097890e13a6441ef67dcca8f7758bf8b9e9a68e6db9",
"format": 1
},
{
@@ -8516,7 +8614,7 @@
"name": "tests/integration/targets/rds_instance_snapshot/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "41d6531cd5e27ef6d38ae9b088278434b723c8fb1f710070d3180763bbc373a3",
+ "chksum_sha256": "716af89f882b306fe95c431a069bb9ee3c99a668d2648ca67fc9b6b159cea257",
"format": 1
},
{
@@ -8747,7 +8845,7 @@
"name": "tests/integration/targets/rds_option_group/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "157966ee1883147d42f021d1a10db168210a43ab8aa1bf9c974ee79ad68e5958",
+ "chksum_sha256": "c2e08a6dae20976ef57a6695bc4eeb46b34b3ccb22471fb557a2dad53dfd8ccb",
"format": 1
},
{
@@ -8831,7 +8929,7 @@
"name": "tests/integration/targets/rds_param_group/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "08b35a90309cc90693f6094e7ad8b78147e027ce522e58b3ae417584da2cd067",
+ "chksum_sha256": "545cb6217e6089547cb19e9ce4b9db90e88e26c03ed0a9cbbe9ada2f1103cd6e",
"format": 1
},
{
@@ -9377,7 +9475,7 @@
"name": "tests/integration/targets/s3_bucket_info/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea2a43f6b1fc35b0f6997300fd3d552d1eb48f562853a8b6c38f9a485c6eae8d",
+ "chksum_sha256": "3ceaf43cde3fa8ba5c6e88b03f68cf2018a284cd5e5d8353f3aab7080d52325b",
"format": 1
},
{
@@ -9430,6 +9528,20 @@
"format": 1
},
{
+ "name": "tests/integration/targets/s3_object/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/s3_object/library/test_s3_upload_multipart.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43d9e289c074629a7d5d3d1b4dc42c3d6c178f37328f4f5b58a7a01f7914edc0",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/s3_object/meta",
"ftype": "dir",
"chksum_type": null,
@@ -9451,10 +9563,17 @@
"format": 1
},
{
+ "name": "tests/integration/targets/s3_object/tasks/copy_multipart_upload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54e55d5f565dd5cb2d27c1d6e643aff1292bf498b2333f0a76127c4e704a9ac7",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/s3_object/tasks/copy_object.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a935fc78dc9973eac71decd7308d4e827c27a7f30b29c7c416f9f676163b4ec7",
+ "chksum_sha256": "b37f07be2da85fa70b7bc64336cb07b6e7830f534d72568eb971ccb80b3832e8",
"format": 1
},
{
@@ -9482,7 +9601,7 @@
"name": "tests/integration/targets/s3_object/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "697d66379fe4c4b33f82b1f7ac363f1e25331480201701b23d28494165346043",
+ "chksum_sha256": "749b020d702b900613fb0af184c973a352075cde62642885b54ecec7aaec2b5e",
"format": 1
},
{
@@ -9937,7 +10056,7 @@
"name": "tests/integration/targets/sts_assume_role/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4ed33b9c99c2880e76ec8f9fba9339414484cc956cd06b69d22cc41d1603c7d8",
+ "chksum_sha256": "32acd11db8baad6e636a80d2e7c1cbf43020b1f684e7254e78c9f4e745ef63fe",
"format": 1
},
{
@@ -10025,6 +10144,13 @@
"format": 1
},
{
+ "name": "tests/sanity/ignore-2.18.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af",
+ "format": 1
+ },
+ {
"name": "tests/unit",
"ftype": "dir",
"chksum_type": null,
@@ -10112,7 +10238,7 @@
"name": "tests/unit/module_utils/botocore/test_is_boto3_error_code.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c714e748e66b64bdd3e00aa7428ecfbfa91b7d4e2cedd41ce2c76ebf308af9e6",
+ "chksum_sha256": "0b8f2e621125c0a9bff6422d8135378711aff4d63476211999de0b7038d6f88e",
"format": 1
},
{
@@ -10294,7 +10420,7 @@
"name": "tests/unit/module_utils/iam/test_iam_resource_transforms.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ba95e45f5d2c2502a8ae6ea6dacc35e340f931a4d4d7fde2064ba0f89018ed0",
+ "chksum_sha256": "cdf2c63ba6eb4f5b50a406d5ccab09724cd5f08d7b4a50480bfe5352c9eba0ed",
"format": 1
},
{
@@ -10343,7 +10469,7 @@
"name": "tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "79fd79b924fb43e8f9b793bba30117beb2d210b536ec98f8c1d9fbeb6d68f560",
+ "chksum_sha256": "d50e7d1b4413e827cce2cd2c48e897ed34943d25da2fa23176017b6fa51f7206",
"format": 1
},
{
@@ -10403,13 +10529,6 @@
"format": 1
},
{
- "name": "tests/unit/module_utils/policy/test_sort_json_policy_dict.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "85396f02abd5a62aaaebcaf17aa26481065d363fb30c9f002e70e4f9013480db",
- "format": 1
- },
- {
"name": "tests/unit/module_utils/retries",
"ftype": "dir",
"chksum_type": null,
@@ -10518,7 +10637,7 @@
"name": "tests/unit/module_utils/test_elbv2.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b29a1b8fb6b96bc4bff39292b3b73a9d9416c64745c977f7bda6643e1d07a5bf",
+ "chksum_sha256": "bdaa8265c4c769c6f31f6c62e8c4aa81db0bf193f368199d712ca836eeb2ec08",
"format": 1
},
{
@@ -11558,6 +11677,13 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/modules/test_lambda_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41efee03ff9ca344cf8eb91104baec65785cab74722dee324e7d377d8505b263",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/modules/test_lambda_layer.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -11680,14 +11806,14 @@
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f60899f9e09f217d9c8963676ddad7d070ce9233e0f32c02b96ad1839ec3cd9f",
+ "chksum_sha256": "da8a721530a2d8c3c3e98ddc0f99b457215be51f4dac8b2a89f388a4ed0813c9",
"format": 1
},
{
"name": "CI.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "af0c6bdfd9336e31a336e71db052555ecd691c3edd8f094382a69fa372db41a4",
+ "chksum_sha256": "87f530f9758927edd7cd21f5628548e9680a8ab51dc32f034c4d5378a705b315",
"format": 1
},
{
@@ -11708,7 +11834,7 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ca9ea646ad4dfc288a174b6f64ce7362a6fbc01fac7236be815b8a679e305cdb",
+ "chksum_sha256": "6513a5b89d2514593c661cc251029be394fdda83185f373f3ed2053d2b3f2f99",
"format": 1
},
{
@@ -11743,7 +11869,7 @@
"name": "tox.ini",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1201123003e01af03ffb7cf8669ea1fc7a8ddc0bff1a181c2484d0bd0087ed5e",
+ "chksum_sha256": "afbb20decd896a762321709bf0813adc283c10e9ae776355021f779dff5c54ce",
"format": 1
}
],
diff --git a/ansible_collections/amazon/aws/MANIFEST.json b/ansible_collections/amazon/aws/MANIFEST.json
index cd19577e3..a1dd8b884 100644
--- a/ansible_collections/amazon/aws/MANIFEST.json
+++ b/ansible_collections/amazon/aws/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "amazon",
"name": "aws",
- "version": "7.5.0",
+ "version": "8.0.0",
"authors": [
"Ansible (https://github.com/ansible)"
],
@@ -17,7 +17,7 @@
"license_file": "COPYING",
"dependencies": {},
"repository": "https://github.com/ansible-collections/amazon.aws",
- "documentation": "https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/index.html",
+ "documentation": "https://ansible-collections.github.io/amazon.aws/branch/main/collections/amazon/aws/index.html",
"homepage": "https://github.com/ansible-collections/amazon.aws",
"issues": "https://github.com/ansible-collections/amazon.aws/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc"
},
@@ -25,7 +25,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "34bdeb6686c662a0a524953b80ae953b67d328542ba5d43bc1e4058c45ed4136",
+ "chksum_sha256": "3973d6f227d778fb935a07725724715ec807dc1b82e133e1f9fd73b95968b735",
"format": 1
},
"format": 1
diff --git a/ansible_collections/amazon/aws/README.md b/ansible_collections/amazon/aws/README.md
index d5e751b91..2fa2b74f7 100644
--- a/ansible_collections/amazon/aws/README.md
+++ b/ansible_collections/amazon/aws/README.md
@@ -5,7 +5,7 @@ AWS related modules and plugins supported by the Ansible community are in the [c
## Ansible version compatibility
-Tested with the Ansible Core >= 2.13.0 versions, and the current development version of Ansible. Ansible Core versions prior to 2.13.0 are not supported.
+Tested with the Ansible Core >= 2.15.0 versions, and the current development version of Ansible. Ansible Core versions prior to 2.15.0 are not supported.
## Python version compatibility
@@ -43,7 +43,7 @@ All support for the original AWS SDK `boto` was removed in release 4.0.0.
## Included content
<!--start collection content-->
-See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/index.html#plugin-index).
+See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/amazon.aws/branch/stable-8/collections/amazon/aws/index.html#plugin-index).
<!--end collection content-->
@@ -107,9 +107,9 @@ You can either call modules by their Fully Qualified Collection Name (FQCN), suc
## Contributing to this collection
We welcome community contributions to this collection. If you find problems, please open an issue or create a PR against the [Amazon AWS collection repository](https://github.com/ansible-collections/amazon.aws).
-See [CONTRIBUTING.md](https://github.com/ansible-collections/amazon.aws/blob/stable-7/CONTRIBUTING.md) for more details.
+See [CONTRIBUTING.md](https://github.com/ansible-collections/amazon.aws/blob/stable-8/CONTRIBUTING.md) for more details.
-This collection is tested using GitHub Actions. To know more about testing, refer to [CI.md](https://github.com/ansible-collections/amazon.aws/blob/stable-7/CI.md).
+This collection is tested using GitHub Actions. To know more about testing, refer to [CI.md](https://github.com/ansible-collections/amazon.aws/blob/stable-8/CI.md).
You can also join us on:
@@ -124,7 +124,7 @@ You can also join us on:
## Release notes
-See the [rendered changelog](https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/amazon.aws/tree/stable-7/CHANGELOG.rst).
+See the [rendered changelog](https://ansible-collections.github.io/amazon.aws/branch/stable-8/collections/amazon/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/amazon.aws/tree/stable-8/CHANGELOG.rst).
## Roadmap
diff --git a/ansible_collections/amazon/aws/changelogs/changelog.yaml b/ansible_collections/amazon/aws/changelogs/changelog.yaml
index 587c55a28..546b1fe22 100644
--- a/ansible_collections/amazon/aws/changelogs/changelog.yaml
+++ b/ansible_collections/amazon/aws/changelogs/changelog.yaml
@@ -2422,6 +2422,75 @@ releases:
- ec2_region.yml
- release-summary.yml
release_date: '2023-10-03'
+ 6.5.1:
+ changes:
+ bugfixes:
+ - ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error
+ (https://github.com/ansible-collections/amazon.aws/pull/1650).
+ - ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time
+ (https://github.com/ansible-collections/amazon.aws/pull/1848).
+ - s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest``
+ (https://github.com/ansible-collections/amazon.aws/pull/1847).
+ - s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support
+ tags (https://github.com/ansible-collections/amazon.aws/issues/1607).
+ - s3_object - when doing a put and specifying ``Content-Type`` in metadata,
+ this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None``
+ causing the put to fail. Fix now correctly honours the specified ``Content-Type``
+ (https://github.com/ansible-collections/amazon.aws/issues/1881).
+ minor_changes:
+ - ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait
+ for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848).
+ release_summary: This release includes several bugfixes.
+ fragments:
+ - 1607-NoSuchTagSet.yml
+ - 1650-fix-invalidinstanceid-notfound.yml
+ - 1799-s3_object-bucket.yml
+ - 1847-s3_object-fix-false-deprecation-warning.yml
+ - 1848-ec2_vpc_subnet-wait-creation.yml
+ - 1881-allow-s3_object-to-specify-content-type-in-metadata.yml
+ - release_summary.yml
+ release_date: '2023-12-07'
+ 6.5.2:
+ changes:
+ bugfixes:
+ - plugins/inventory/aws_ec2 - Fix failure when retrieving information for more
+ than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713).
+ release_summary: This release includes a bugfix for the ``amazon.aws.aws_ec2``
+ inventory plugin when retrieving information for more than 40 instances with
+ ``use_ssm_inventory``.
+ fragments:
+ - 20240129-aws_ec2-inventory-bugfix.yml
+ - release_summary.yml
+ release_date: '2024-02-06'
+ 6.5.3:
+ changes:
+ bugfixes:
+ - cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided
+ input_template (https://github.com/ansible-collections/amazon.aws/pull/1883).
+ - lookup/secretsmanager_secret - fix the issue when the nested secret is missing
+ and on_missing is set to warn, the lookup was raising an error instead of
+ a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781).
+ release_summary: This release includes bugfixes for the``cloudwatchevent_rule``
+ module and ``secretsmanager_secret`` lookup plugin.
+ fragments:
+ - 1883-cloudwatchevent_rule-fix-json-input-handling-for-input_template.yml
+ - 20240212-lookup-secretsmanager_secret-fix-issue-when-nested-is-missing-and-on_missing-is-set-to-warn.yaml
+ - release_summary.yml
+ - remove_rendundant_workflow.yml
+ release_date: '2024-03-06'
+ 6.5.4:
+ changes:
+ bugfixes:
+ - cloudwatchlogs_log_group_info - Implement exponential backoff when making
+ API calls to prevent throttling exceptions (https://github.com/ansible-collections/amazon.aws/issues/2011).
+ - plugin_utils.inventory - Ensure templated options in lookup plugins are converted
+ (https://github.com/ansible-collections/amazon.aws/issues/1955).
+ release_summary: This release includes bugfixes for the ``cloudwatchlogs_log_group_info``
+ module and the inventory plugins.
+ fragments:
+ - 20240314-cloudwatchlogs_log_group_info-fix-throttling-exceptions.yml
+ - 6.5.4-release-summary.yml
+ release_date: '2024-04-03'
7.0.0:
changes:
breaking_changes:
@@ -2802,3 +2871,158 @@ releases:
- 20240314-s3_object-copy-mode-with-metadata.yml
- 20240321-iam-user-info.yml
release_date: '2024-04-03'
+ 7.6.0:
+ changes:
+ bugfixes:
+ - iam_managed_policy - fixes bug that causes ``ParamValidationError`` when attempting
+ to delete a policy that's attached to a role or a user (https://github.com/ansible-collections/amazon.aws/issues/2067).
+ - iam_role_info - fixes bug in handling paths missing the ``/`` prefix and/or
+ suffix (https://github.com/ansible-collections/amazon.aws/issues/2065).
+ - s3_object - fix idempotency issue when copying object uploaded using multipart
+ upload (https://github.com/ansible-collections/amazon.aws/issues/2016).
+ deprecated_features:
+ - cloudformation - the ``template`` parameter has been deprecated and will be
+ removed in a release after 2026-05-01. The ``template_body`` parameter can
+ be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+ - module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()``
+ will be removed in a release after 2025-05-01. The ``boto3`` parameter has
+ been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ - module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will
+ be removed in a release after 2025-05-01. The ``boto3`` parameter has been
+ ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ - module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()``
+ will be removed in a release after 2025-05-01. The ``boto3`` parameter has
+ been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ minor_changes:
+ - ec2_instance - add support for ``host`` option in placement.tenancy (https://github.com/ansible-collections/amazon.aws/pull/2026).
+ - ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when
+ trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+ release_summary: This release brings several bugfixes, minor changes and some
+ new rds modules (``rds_cluster_param_group``, ``rds_cluster_param_group_info``
+ and ``rds_engine_versions_info``). It also introduces a deprecation for the
+ ``cloudformation`` module.
+ fragments:
+ - 20240322-s3_object-fix-idempotency-copy-issue-with-multipart-uploaded-object.yml
+ - 20240403-ec2_securty_group-sanity.yml
+ - 20240411-ec2_vol.yml
+ - 2026-ec2_instance-add-support-for-placement-tenancy-host.yml
+ - 2065-iam_role_info.yml
+ - 2067-iam_managed_policy-delete.yml
+ - cloudformation-template.yml
+ - release_summary.yml
+ - sanity-boto3.yml
+ modules:
+ - description: Manage RDS cluster parameter groups
+ name: rds_cluster_param_group
+ namespace: ''
+ - description: Describes the properties of specific RDS cluster parameter group.
+ name: rds_cluster_param_group_info
+ namespace: ''
+ - description: Describes the properties of specific versions of DB engines.
+ name: rds_engine_versions_info
+ namespace: ''
+ release_date: '2024-05-07'
+ 8.0.0:
+ changes:
+ breaking_changes:
+ - amazon.aws collection - Support for ansible-core < 2.15 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/2093).
+ - iam_role - ``iam_role.assume_role_policy_document`` is no longer converted
+ from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - iam_role_info - ``iam_role.assume_role_policy_document`` is no longer converted
+ from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - kms_key - the ``policies`` return value has been renamed to ``key_policies``
+ the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - kms_key_info - the ``policies`` return value has been renamed to ``key_policies``
+ the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - lambda_event - | ``batch_size`` no longer defaults to 100. According to the
+ boto3 API (https://boto3.amazonaws.com/v1/documentation/api/1.26.78/reference/services/lambda.html#Lambda.Client.create_event_source_mapping),
+ ``batch_size`` defaults to 10 for sqs sources and to 100 for stream sources
+ (https://github.com/ansible-collections/amazon.aws/pull/2025).
+ bugfixes:
+ - elb_classic_lb - fixes bug where ``proxy_protocol`` not being set or being
+ set to ``None`` may result in unexpected behaviour or errors (https://github.com/ansible-collections/amazon.aws/pull/2049).
+ - lambda_event - Fix when ``batch_size`` is greater than 10, by enabling support
+ for setting ``maximum_batching_window_in_seconds`` (https://github.com/ansible-collections/amazon.aws/pull/2025).
+ - lambda_event - Retrieve function ARN using AWS API (get_function) instead
+ of building it with AWS account information (https://github.com/ansible-collections/amazon.aws/issues/1859).
+ deprecated_features:
+ - aws_ec2 inventory plugin - removal of the previously deprecated ``include_extra_api_calls``
+ option has been assigned to release 9.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - cloudformation - the ``template`` parameter has been deprecated and will be
+ removed in a release after 2026-05-01. The ``template_body`` parameter can
+ be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+ - iam_policy - removal of the previously deprecated ``policies`` return key
+ has been assigned to release 9.0.0. Use the ``policy_names`` return key instead
+ (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()``
+ will be removed in a release after 2025-05-01. The ``boto3`` parameter has
+ been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ - module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will
+ be removed in a release after 2025-05-01. The ``boto3`` parameter has been
+ ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ - module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()``
+ will be removed in a release after 2025-05-01. The ``boto3`` parameter has
+ been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+ - rds_param_group - the ``rds_param_group`` module has been renamed to ``rds_instance_param_group``.
+ The usage of the module has not changed. The rds_param_group alias will be
+ removed in version 10.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2058).
+ minor_changes:
+ - autoscaling_group - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - cloudformation - apply automatic retries when paginating through stack events
+ without a filter (https://github.com/ansible-collections/amazon.aws/pull/2049).
+ - cloudtrail - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - ec2_instance - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when
+ trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+ - ec2_vol - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - elb_classic_lb - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - kms_key - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ - lambda_event - Add support for setting the ``maximum_batching_window_in_seconds``
+ option (https://github.com/ansible-collections/amazon.aws/pull/2025).
+ - module_uils/botocore - support sets and tuples of errors as well as lists
+ (https://github.com/ansible-collections/amazon.aws/pull/1829).
+ - module_utils/elbv2 - Add support for adding listener with multiple certificates
+ during ALB creation. Allows elb_application_elb module to handle mentioned
+ use case. (https://github.com/ansible-collections/amazon.aws/pull/1950).
+ - module_utils/elbv2 - Add the possibility to update ``SslPolicy``, ``Certificates``
+ and ``AlpnPolicy`` for TLS listeners (https://github.com/ansible-collections/amazon.aws/issues/1198).
+ - rds_instance - Allow passing empty list to ``enable_cloudwatch_logs_exports``
+ in order to remove all existing exports (https://github.com/ansible-collections/amazon.aws/pull/1917).
+ - s3_bucket - refactor s3_bucket module code for improved readability and maintainability
+ (https://github.com/ansible-collections/amazon.aws/pull/2057).
+ - s3_object - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+ release_summary: This major release brings several new features, bug fixes,
+ and deprecated features. It also includes the removal of some functionality
+ for ``iam_role, iam_role_info`` and ``module_utils.policy`` that were previously
+ deprecated. We have also removed support for ``ansible-core<2.15``.
+ removed_features:
+ - iam_role - the ``iam_role.assume_role_policy_document_raw`` return value has
+ been deprecated. ``iam_role.assume_role_policy_document`` now returns the
+ same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - iam_role_info - the ``iam_role.assume_role_policy_document_raw`` return value
+ has been deprecated. ``iam_role.assume_role_policy_document`` now returns
+ the same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+ - module_utils.policy - the previously deprecated ``sort_json_policy_dict()``
+ function has been removed, consider using ``compare_policies()`` instead (https://github.com/ansible-collections/amazon.aws/pull/2052).
+ fragments:
+ - 1829-is_boto3_error-tuple.yml
+ - 1950-elb_application_lb-multiple-listener-certificates-for-alb.yml
+ - 20231127-module_utils-elbv2-update.yaml
+ - 20231211-rds_instance_cloudwatch.yml
+ - 20240208-lambda_event-fix-validate-params.yml
+ - 20240227-zombies.yml
+ - 20240325-lambda_event-bugfix.yml
+ - 20240411-remove-deprecated-sort_json_policy_dict.yml
+ - 20240502-docs_cleanup.yml
+ - 20240516-rds_param_group.yml
+ - 20250513-update-docs.yml
+ - 2057-s3_bucket-refactor.yml
+ - 8.0.0-deprecation-removal.yml
+ - 8.0.0-dev0.yml
+ - 8.0.0-increase-ansible-core-version.yml
+ - cloudformation-template.yml
+ - release_summary.yml
+ - return_block_update_ec2_1.yml
+ - sanity-boto3.yml
+ - sanity-simple.yml
+ release_date: '2024-05-16'
diff --git a/ansible_collections/amazon/aws/docs/docsite/links.yml b/ansible_collections/amazon/aws/docs/docsite/links.yml
index 6bdcc680b..e50089d15 100644
--- a/ansible_collections/amazon/aws/docs/docsite/links.yml
+++ b/ansible_collections/amazon/aws/docs/docsite/links.yml
@@ -7,7 +7,7 @@
# functionality for your collection.
edit_on_github:
repository: ansible-collections/amazon.aws
- branch: stable-7
+ branch: stable-8
# If your collection root (the directory containing galaxy.yml) does not coincide with your
# repository's root, you have to specify the path to the collection root here. For example,
# if the collection root is in a subdirectory ansible_collections/community/REPO_NAME
diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst
index 219d962b4..f867fc9e4 100644
--- a/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst
+++ b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst
@@ -4,6 +4,104 @@ amazon.aws Release Notes
.. contents:: Topics
+v8.0.0
+======
+
+Release Summary
+---------------
+
+This major release brings several new features, bug fixes, and deprecated features. It also includes the removal of some functionality for ``iam_role, iam_role_info`` and ``module_utils.policy`` that were previously deprecated. We have also removed support for ``ansible-core<2.15``.
+
+Minor Changes
+-------------
+
+- autoscaling_group - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- cloudformation - apply automatic retries when paginating through stack events without a filter (https://github.com/ansible-collections/amazon.aws/pull/2049).
+- cloudtrail - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- ec2_instance - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+- ec2_vol - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- elb_classic_lb - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- kms_key - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+- lambda_event - Add support for setting the ``maximum_batching_window_in_seconds`` option (https://github.com/ansible-collections/amazon.aws/pull/2025).
+- module_uils/botocore - support sets and tuples of errors as well as lists (https://github.com/ansible-collections/amazon.aws/pull/1829).
+- module_utils/elbv2 - Add support for adding listener with multiple certificates during ALB creation. Allows elb_application_elb module to handle mentioned use case. (https://github.com/ansible-collections/amazon.aws/pull/1950).
+- module_utils/elbv2 - Add the possibility to update ``SslPolicy``, ``Certificates`` and ``AlpnPolicy`` for TLS listeners (https://github.com/ansible-collections/amazon.aws/issues/1198).
+- rds_instance - Allow passing empty list to ``enable_cloudwatch_logs_exports`` in order to remove all existing exports (https://github.com/ansible-collections/amazon.aws/pull/1917).
+- s3_bucket - refactor s3_bucket module code for improved readability and maintainability (https://github.com/ansible-collections/amazon.aws/pull/2057).
+- s3_object - removed unused code (https://github.com/ansible-collections/amazon.aws/pull/1996).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- amazon.aws collection - Support for ansible-core < 2.15 has been dropped (https://github.com/ansible-collections/amazon.aws/pull/2093).
+- iam_role - ``iam_role.assume_role_policy_document`` is no longer converted from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- iam_role_info - ``iam_role.assume_role_policy_document`` is no longer converted from CamelCase to snake_case (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- kms_key - the ``policies`` return value has been renamed to ``key_policies`` the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- kms_key_info - the ``policies`` return value has been renamed to ``key_policies`` the contents has not been changed (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- lambda_event - | ``batch_size`` no longer defaults to 100. According to the boto3 API (https://boto3.amazonaws.com/v1/documentation/api/1.26.78/reference/services/lambda.html#Lambda.Client.create_event_source_mapping), ``batch_size`` defaults to 10 for sqs sources and to 100 for stream sources (https://github.com/ansible-collections/amazon.aws/pull/2025).
+
+Deprecated Features
+-------------------
+
+- aws_ec2 inventory plugin - removal of the previously deprecated ``include_extra_api_calls`` option has been assigned to release 9.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- cloudformation - the ``template`` parameter has been deprecated and will be removed in a release after 2026-05-01. The ``template_body`` parameter can be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+- iam_policy - removal of the previously deprecated ``policies`` return key has been assigned to release 9.0.0. Use the ``policy_names`` return key instead (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- rds_param_group - the ``rds_param_group`` module has been renamed to ``rds_instance_param_group``. The usage of the module has not changed. The rds_param_group alias will be removed in version 10.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2058).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- iam_role - the ``iam_role.assume_role_policy_document_raw`` return value has been deprecated. ``iam_role.assume_role_policy_document`` now returns the same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- iam_role_info - the ``iam_role.assume_role_policy_document_raw`` return value has been deprecated. ``iam_role.assume_role_policy_document`` now returns the same format as ``iam_role.assume_role_policy_document_raw`` (https://github.com/ansible-collections/amazon.aws/pull/2040).
+- module_utils.policy - the previously deprecated ``sort_json_policy_dict()`` function has been removed, consider using ``compare_policies()`` instead (https://github.com/ansible-collections/amazon.aws/pull/2052).
+
+Bugfixes
+--------
+
+- elb_classic_lb - fixes bug where ``proxy_protocol`` not being set or being set to ``None`` may result in unexpected behaviour or errors (https://github.com/ansible-collections/amazon.aws/pull/2049).
+- lambda_event - Fix when ``batch_size`` is greater than 10, by enabling support for setting ``maximum_batching_window_in_seconds`` (https://github.com/ansible-collections/amazon.aws/pull/2025).
+- lambda_event - Retrieve function ARN using AWS API (get_function) instead of building it with AWS account information (https://github.com/ansible-collections/amazon.aws/issues/1859).
+
+v7.6.0
+======
+
+Release Summary
+---------------
+
+This release brings several bugfixes, minor changes and some new rds modules (``rds_cluster_param_group``, ``rds_cluster_param_group_info`` and ``rds_engine_versions_info``). It also introduces a deprecation for the ``cloudformation`` module.
+
+Minor Changes
+-------------
+
+- ec2_instance - add support for ``host`` option in placement.tenancy (https://github.com/ansible-collections/amazon.aws/pull/2026).
+- ec2_vol - Ensure volume state is not one of ``deleted`` or ``deleting`` when trying to delete volume, to guaranty idempotency (https://github.com/ansible-collections/amazon.aws/pull/2052).
+
+Deprecated Features
+-------------------
+
+- cloudformation - the ``template`` parameter has been deprecated and will be removed in a release after 2026-05-01. The ``template_body`` parameter can be used in conjungtion with the lookup plugin (https://github.com/ansible-collections/amazon.aws/pull/2048).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_connection_info()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.botocore - the ``boto3`` parameter for ``get_aws_region()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+- module_utils.ec2 - the ``boto3`` parameter for ``get_ec2_security_group_ids_from_names()`` will be removed in a release after 2025-05-01. The ``boto3`` parameter has been ignored since release 4.0.0 (https://github.com/ansible-collections/amazon.aws/pull/2047).
+
+Bugfixes
+--------
+
+- iam_managed_policy - fixes bug that causes ``ParamValidationError`` when attempting to delete a policy that's attached to a role or a user (https://github.com/ansible-collections/amazon.aws/issues/2067).
+- iam_role_info - fixes bug in handling paths missing the ``/`` prefix and/or suffix (https://github.com/ansible-collections/amazon.aws/issues/2065).
+- s3_object - fix idempotency issue when copying object uploaded using multipart upload (https://github.com/ansible-collections/amazon.aws/issues/2016).
+
+New Modules
+-----------
+
+- rds_cluster_param_group - Manage RDS cluster parameter groups
+- rds_cluster_param_group_info - Describes the properties of specific RDS cluster parameter group.
+- rds_engine_versions_info - Describes the properties of specific versions of DB engines.
+
v7.5.0
======
@@ -244,6 +342,69 @@ New Modules
- ec2_import_image_info - Gather information about import virtual machine tasks
- rds_global_cluster_info - Obtain information about Aurora global database clusters
+v6.5.4
+======
+
+Release Summary
+---------------
+
+This release includes bugfixes for the ``cloudwatchlogs_log_group_info`` module and the inventory plugins.
+
+Bugfixes
+--------
+
+- cloudwatchlogs_log_group_info - Implement exponential backoff when making API calls to prevent throttling exceptions (https://github.com/ansible-collections/amazon.aws/issues/2011).
+- plugin_utils.inventory - Ensure templated options in lookup plugins are converted (https://github.com/ansible-collections/amazon.aws/issues/1955).
+
+v6.5.3
+======
+
+Release Summary
+---------------
+
+This release includes bugfixes for the``cloudwatchevent_rule`` module and ``secretsmanager_secret`` lookup plugin.
+
+Bugfixes
+--------
+
+- cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided input_template (https://github.com/ansible-collections/amazon.aws/pull/1883).
+- lookup/secretsmanager_secret - fix the issue when the nested secret is missing and on_missing is set to warn, the lookup was raising an error instead of a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781).
+
+v6.5.2
+======
+
+Release Summary
+---------------
+
+This release includes a bugfix for the ``amazon.aws.aws_ec2`` inventory plugin when retrieving information for more than 40 instances with ``use_ssm_inventory``.
+
+Bugfixes
+--------
+
+- plugins/inventory/aws_ec2 - Fix failure when retrieving information for more than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713).
+
+v6.5.1
+======
+
+Release Summary
+---------------
+
+This release includes several bugfixes.
+
+Minor Changes
+-------------
+
+- ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848).
+
+Bugfixes
+--------
+
+- ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error (https://github.com/ansible-collections/amazon.aws/pull/1650).
+- ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time (https://github.com/ansible-collections/amazon.aws/pull/1848).
+- s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest`` (https://github.com/ansible-collections/amazon.aws/pull/1847).
+- s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support tags (https://github.com/ansible-collections/amazon.aws/issues/1607).
+- s3_object - when doing a put and specifying ``Content-Type`` in metadata, this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None`` causing the put to fail. Fix now correctly honours the specified ``Content-Type`` (https://github.com/ansible-collections/amazon.aws/issues/1881).
+
v6.5.0
======
diff --git a/ansible_collections/amazon/aws/meta/runtime.yml b/ansible_collections/amazon/aws/meta/runtime.yml
index 37e524c9d..94614615f 100644
--- a/ansible_collections/amazon/aws/meta/runtime.yml
+++ b/ansible_collections/amazon/aws/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.13.0"
+requires_ansible: ">=2.15.0"
action_groups:
aws:
- autoscaling_group
@@ -98,10 +98,14 @@ action_groups:
- lambda_policy
- rds_cluster
- rds_cluster_info
- - rds_global_cluster_info
+ - rds_cluster_param_group
+ - rds_cluster_param_group_info
- rds_cluster_snapshot
+ - rds_engine_versions_info
+ - rds_global_cluster_info
- rds_instance
- rds_instance_info
+ - rds_instance_param_group
- rds_instance_snapshot
- rds_option_group
- rds_option_group_info
@@ -155,6 +159,13 @@ plugin_routing:
execute_lambda:
# Deprecation for this alias should not *start* prior to 2024-09-01
redirect: amazon.aws.lambda_execute
+ rds_param_group:
+ redirect: amazon.aws.rds_instance_param_group
+ deprecation:
+ removal_version: 10.0.0
+ warning_text: >-
+ rds_param_group has been renamed to rds_instance_param_group.
+ Please update your tasks.
lookup:
aws_ssm:
# Deprecation for this alias should not *start* prior to 2024-09-01
diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
index 8b9796b7f..bf0bc50b1 100644
--- a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
+++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py
@@ -633,17 +633,17 @@ class InventoryModule(AWSInventoryBase):
"""
instances = []
ids_to_ignore = []
- for filter in exclude_filters:
+ for filter_dict in exclude_filters:
for i in self._get_instances_by_region(
regions,
- ansible_dict_to_boto3_filter_list(filter),
+ ansible_dict_to_boto3_filter_list(filter_dict),
strict_permissions,
):
ids_to_ignore.append(i["InstanceId"])
- for filter in include_filters:
+ for filter_dict in include_filters:
for i in self._get_instances_by_region(
regions,
- ansible_dict_to_boto3_filter_list(filter),
+ ansible_dict_to_boto3_filter_list(filter_dict),
strict_permissions,
):
if i["InstanceId"] not in ids_to_ignore:
@@ -805,8 +805,8 @@ class InventoryModule(AWSInventoryBase):
if self.get_option("include_extra_api_calls"):
self.display.deprecate(
- "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.",
- date="2024-09-01",
+ "The include_extra_api_calls option has been deprecated and will be removed in release 9.0.0.",
+ version="9.0.0",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
index 35f05c94e..c03f14450 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py
@@ -49,7 +49,7 @@ except ImportError:
class LookupModule(LookupBase):
- def lookup_constant(self, name):
+ def lookup_constant(self, name): # pylint: disable=too-many-return-statements
if name == "MINIMUM_BOTOCORE_VERSION":
return botocore_utils.MINIMUM_BOTOCORE_VERSION
if name == "MINIMUM_BOTO3_VERSION":
diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
index c01f583f0..d5ced781b 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py
@@ -44,13 +44,10 @@ _raw:
import json
+import ansible.module_utils.six.moves.urllib.error
+import ansible.module_utils.urls
from ansible.errors import AnsibleLookupError
from ansible.module_utils._text import to_native
-from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils.six.moves.urllib.error import URLError
-from ansible.module_utils.urls import ConnectionError
-from ansible.module_utils.urls import SSLValidationError
-from ansible.module_utils.urls import open_url
from ansible.plugins.lookup import LookupBase
@@ -64,19 +61,19 @@ class LookupModule(LookupBase):
ip_prefix_label = "ip_prefix"
try:
- resp = open_url("https://ip-ranges.amazonaws.com/ip-ranges.json")
+ resp = ansible.module_utils.urls.open_url("https://ip-ranges.amazonaws.com/ip-ranges.json")
amazon_response = json.load(resp)[prefixes_label]
except getattr(json.decoder, "JSONDecodeError", ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleLookupError(f"Could not decode AWS IP ranges: {to_native(e)}")
- except HTTPError as e:
+ except ansible.module_utils.six.moves.urllib.error.HTTPError as e:
raise AnsibleLookupError(f"Received HTTP error while pulling IP ranges: {to_native(e)}")
- except SSLValidationError as e:
+ except ansible.module_utils.urls.SSLValidationError as e:
raise AnsibleLookupError(f"Error validating the server's certificate for: {to_native(e)}")
- except URLError as e:
+ except ansible.module_utils.six.moves.urllib.error.URLError as e:
raise AnsibleLookupError(f"Failed look up IP range service: {to_native(e)}")
- except ConnectionError as e:
+ except ansible.module_utils.urls.ConnectionError as e:
raise AnsibleLookupError(f"Error connecting to IP range service: {to_native(e)}")
if "region" in kwargs:
diff --git a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
index 06ad10be5..254182f30 100644
--- a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
+++ b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py
@@ -182,9 +182,9 @@ class LookupModule(AWSLookupBase):
secrets = {}
for term in terms:
try:
- for object in _list_secrets(client, term):
- if "SecretList" in object:
- for secret_obj in object["SecretList"]:
+ for secret_wrapper in _list_secrets(client, term):
+ if "SecretList" in secret_wrapper:
+ for secret_obj in secret_wrapper["SecretList"]:
secrets.update(
{
secret_obj["Name"]: self.get_secret_value(
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
index ab3a9f073..4febe8743 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/acm.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py
@@ -40,7 +40,7 @@ def acm_catch_boto_exception(func):
return func(*args, **kwargs)
except is_boto3_error_code(ignore_error_codes):
return None
- except (BotoCoreError, ClientError) as e:
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
if not module:
raise
module.fail_json_aws(e, msg=error)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
index 858e4e593..d5ad7ea83 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py
@@ -202,7 +202,14 @@ def _aws_region(params):
return None
-def get_aws_region(module, boto3=None):
+def get_aws_region(module, boto3=None): # pylint: disable=redefined-outer-name
+ if boto3 is not None:
+ module.deprecate(
+ "get_aws_region(): the boto3 parameter will be removed in a release after 2025-05-01. "
+ "The parameter has been ignored since release 4.0.0.",
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
try:
return _aws_region(module.params)
except AnsibleBotocoreError as e:
@@ -266,7 +273,14 @@ def _aws_connection_info(params):
return region, endpoint_url, boto_params
-def get_aws_connection_info(module, boto3=None):
+def get_aws_connection_info(module, boto3=None): # pylint: disable=redefined-outer-name
+ if boto3 is not None:
+ module.deprecate(
+ "get_aws_connection_info(): the boto3 parameter will be removed in a release after 2025-05-01. "
+ "The parameter has been ignored since release 4.0.0.",
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
try:
return _aws_connection_info(module.params)
except AnsibleBotocoreError as e:
@@ -335,7 +349,7 @@ def is_boto3_error_code(code, e=None):
import sys
dummy, e, dummy = sys.exc_info()
- if not isinstance(code, list):
+ if not isinstance(code, (list, tuple, set)):
code = [code]
if isinstance(e, ClientError) and e.response["Error"]["Code"] in code:
return ClientError
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/common.py b/ansible_collections/amazon/aws/plugins/module_utils/common.py
index 41ba80231..e802a8d80 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/common.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/common.py
@@ -4,7 +4,7 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
AMAZON_AWS_COLLECTION_NAME = "amazon.aws"
-AMAZON_AWS_COLLECTION_VERSION = "7.5.0"
+AMAZON_AWS_COLLECTION_VERSION = "8.0.0"
_collection_info_context = {
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
index afe8208f5..f3aa9f3f1 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py
@@ -39,6 +39,7 @@ up in this module because "that's where the AWS code was" (originally).
import re
+import ansible.module_utils.common.warnings as ansible_warnings
from ansible.module_utils.ansible_release import __version__
# Used to live here, moved into ansible.module_utils.common.dict_transformations
@@ -72,7 +73,6 @@ from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=u
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.policy
from .policy import _py3cmp as py3cmp # pylint: disable=unused-import
from .policy import compare_policies # pylint: disable=unused-import
-from .policy import sort_json_policy_dict # pylint: disable=unused-import
# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.retries
from .retries import AWSRetry # pylint: disable=unused-import
@@ -99,12 +99,22 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id
a try block
"""
- def get_sg_name(sg, boto3=None):
+ def get_sg_name(sg):
return str(sg["GroupName"])
- def get_sg_id(sg, boto3=None):
+ def get_sg_id(sg):
return str(sg["GroupId"])
+ if boto3 is not None:
+ ansible_warnings.deprecate(
+ (
+ "The boto3 parameter for get_ec2_security_group_ids_from_names() has been deprecated."
+ "The parameter has been ignored since release 4.0.0."
+ ),
+ date="2025-05-01",
+ collection_name="amazon.aws",
+ )
+
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
@@ -124,7 +134,7 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id
else:
all_sec_groups = ec2_connection.describe_security_groups()["SecurityGroups"]
- unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
index 758eb9a33..3da2114c7 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py
@@ -449,7 +449,7 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2):
if module.params.get("security_groups") is not None:
try:
self.security_groups = AWSRetry.jittered_backoff()(get_ec2_security_group_ids_from_names)(
- module.params.get("security_groups"), self.connection_ec2, boto3=True
+ module.params.get("security_groups"), self.connection_ec2
)
except ValueError as e:
self.module.fail_json(msg=str(e), exception=traceback.format_exc())
@@ -775,6 +775,9 @@ class ELBListeners:
dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None)
for listener_dict in listeners
]
+ # AlpnPolicy is set as str into input but API is expected a list
+ # Transform a single item into a list of one element
+ listeners = self._ensure_listeners_alpn_policy(listeners)
self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
self.current_listeners = self._get_elb_listeners()
self.purge_listeners = module.params.get("purge_listeners")
@@ -805,6 +808,16 @@ class ELBListeners:
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
+ @staticmethod
+ def _ensure_listeners_alpn_policy(listeners):
+ result = []
+ for l in listeners:
+ update_listener = deepcopy(l)
+ if "AlpnPolicy" in l:
+ update_listener["AlpnPolicy"] = [update_listener["AlpnPolicy"]]
+ result.append(update_listener)
+ return result
+
def _ensure_listeners_default_action_has_arn(self, listeners):
"""
If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
@@ -863,7 +876,8 @@ class ELBListeners:
return listeners_to_add, listeners_to_modify, listeners_to_delete
- def _compare_listener(self, current_listener, new_listener):
+ @staticmethod
+ def _compare_listener(current_listener, new_listener):
"""
Compare two listeners.
@@ -882,43 +896,53 @@ class ELBListeners:
if current_listener["Protocol"] != new_listener["Protocol"]:
modified_listener["Protocol"] = new_listener["Protocol"]
- # If Protocol is HTTPS, check additional attributes
- if current_listener["Protocol"] == "HTTPS" and new_listener["Protocol"] == "HTTPS":
- # Cert
- if current_listener["SslPolicy"] != new_listener["SslPolicy"]:
- modified_listener["SslPolicy"] = new_listener["SslPolicy"]
- if (
- current_listener["Certificates"][0]["CertificateArn"]
- != new_listener["Certificates"][0]["CertificateArn"]
+ # If Protocol is HTTPS or TLS, check additional attributes
+ # SslPolicy
+ new_ssl_policy = new_listener.get("SslPolicy")
+ if new_ssl_policy and new_listener["Protocol"] in ("HTTPS", "TLS"):
+ current_ssl_policy = current_listener.get("SslPolicy")
+ if not current_ssl_policy or (current_ssl_policy and current_ssl_policy != new_ssl_policy):
+ modified_listener["SslPolicy"] = new_ssl_policy
+
+ # Certificates
+ new_certificates = new_listener.get("Certificates")
+ if new_certificates and new_listener["Protocol"] in ("HTTPS", "TLS"):
+ current_certificates = current_listener.get("Certificates")
+ if not current_certificates or (
+ current_certificates
+ and current_certificates[0]["CertificateArn"] != new_certificates[0]["CertificateArn"]
):
- modified_listener["Certificates"] = []
- modified_listener["Certificates"].append({})
- modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0][
- "CertificateArn"
- ]
- elif current_listener["Protocol"] != "HTTPS" and new_listener["Protocol"] == "HTTPS":
- modified_listener["SslPolicy"] = new_listener["SslPolicy"]
- modified_listener["Certificates"] = []
- modified_listener["Certificates"].append({})
- modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0]["CertificateArn"]
+ modified_listener["Certificates"] = [{"CertificateArn": new_certificates[0]["CertificateArn"]}]
# Default action
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
- if len(current_listener["DefaultActions"]) == len(new_listener["DefaultActions"]):
- current_actions_sorted = _sort_actions(current_listener["DefaultActions"])
- new_actions_sorted = _sort_actions(new_listener["DefaultActions"])
-
- new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
-
- if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [
- _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
- ]:
- modified_listener["DefaultActions"] = new_listener["DefaultActions"]
- # If the action lengths are different, then replace with the new actions
- else:
- modified_listener["DefaultActions"] = new_listener["DefaultActions"]
+ current_default_actions = current_listener.get("DefaultActions")
+ new_default_actions = new_listener.get("DefaultActions")
+ if new_default_actions:
+ if current_default_actions and len(current_default_actions) == len(new_default_actions):
+ current_actions_sorted = _sort_actions(current_default_actions)
+ new_actions_sorted = _sort_actions(new_default_actions)
+
+ new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted]
+
+ if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [
+ _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret
+ ]:
+ modified_listener["DefaultActions"] = new_default_actions
+ # If the action lengths are different, then replace with the new actions
+ else:
+ modified_listener["DefaultActions"] = new_default_actions
+
+ new_alpn_policy = new_listener.get("AlpnPolicy")
+ if new_alpn_policy:
+ if current_listener["Protocol"] == "TLS" and new_listener["Protocol"] == "TLS":
+ current_alpn_policy = current_listener.get("AlpnPolicy")
+ if not current_alpn_policy or current_alpn_policy[0] != new_alpn_policy[0]:
+ modified_listener["AlpnPolicy"] = new_alpn_policy
+ elif current_listener["Protocol"] != "TLS" and new_listener["Protocol"] == "TLS":
+ modified_listener["AlpnPolicy"] = new_alpn_policy
if modified_listener:
return modified_listener
@@ -946,7 +970,23 @@ class ELBListener:
# Rules is not a valid parameter for create_listener
if "Rules" in self.listener:
self.listener.pop("Rules")
- AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
+
+ # handle multiple certs by adding only 1 cert during listener creation and make calls to add_listener_certificates to add other certs
+ listener_certificates = self.listener.get("Certificates", [])
+ first_certificate, other_certs = [], []
+ if len(listener_certificates) > 0:
+ first_certificate, other_certs = listener_certificates[0], listener_certificates[1:]
+ self.listener["Certificates"] = [first_certificate]
+ # create listener
+ create_listener_result = AWSRetry.jittered_backoff()(self.connection.create_listener)(
+ LoadBalancerArn=self.elb_arn, **self.listener
+ )
+ # only one cert can be specified per call to add_listener_certificates
+ for cert in other_certs:
+ AWSRetry.jittered_backoff()(self.connection.add_listener_certificates)(
+ ListenerArn=create_listener_result["Listeners"][0]["ListenerArn"], Certificates=[cert]
+ )
+
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
index 56920d53e..155a63152 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/iam.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py
@@ -49,14 +49,14 @@ def detach_iam_group_policy(client, arn, group):
@IAMErrorHandler.deletion_error_handler("detach role policy")
@AWSRetry.jittered_backoff()
def detach_iam_role_policy(client, arn, role):
- client.detach_group_policy(PolicyArn=arn, RoleName=role)
+ client.detach_role_policy(PolicyArn=arn, RoleName=role)
return True
@IAMErrorHandler.deletion_error_handler("detach user policy")
@AWSRetry.jittered_backoff()
def detach_iam_user_policy(client, arn, user):
- client.detach_group_policy(PolicyArn=arn, UserName=user)
+ client.detach_user_policy(PolicyArn=arn, UserName=user)
return True
@@ -446,8 +446,6 @@ def normalize_iam_access_keys(access_keys: BotoResourceList) -> AnsibleAWSResour
def normalize_iam_instance_profile(profile: BotoResource) -> AnsibleAWSResource:
"""
Converts a boto3 format IAM instance profile into "Ansible" format
-
- _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
"""
transforms = {"Roles": _normalize_iam_roles}
transformed_profile = boto3_resource_to_ansible_dict(profile, nested_transforms=transforms)
@@ -458,10 +456,10 @@ def normalize_iam_role(role: BotoResource, _v7_compat: bool = False) -> AnsibleA
"""
Converts a boto3 format IAM instance role into "Ansible" format
- _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE.
+ _v7_compat is deprecated and will be removed in release after 2026-05-01 DO NOT USE.
"""
transforms = {"InstanceProfiles": _normalize_iam_instance_profiles}
- ignore_list = [] if _v7_compat else ["AssumeRolePolicyDocument"]
+ ignore_list = ["AssumeRolePolicyDocument"]
transformed_role = boto3_resource_to_ansible_dict(role, nested_transforms=transforms, ignore_list=ignore_list)
if _v7_compat and role.get("AssumeRolePolicyDocument"):
transformed_role["assume_role_policy_document_raw"] = role["AssumeRolePolicyDocument"]
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
index 8a2ff3c0b..82a81811d 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/modules.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py
@@ -84,11 +84,11 @@ class AnsibleAWSModule:
def __init__(self, **kwargs):
local_settings = {}
- for key in AnsibleAWSModule.default_settings:
+ for key, default_value in AnsibleAWSModule.default_settings.items():
try:
local_settings[key] = kwargs.pop(key)
except KeyError:
- local_settings[key] = AnsibleAWSModule.default_settings[key]
+ local_settings[key] = default_value
self.settings = local_settings
if local_settings["default_args"]:
@@ -192,21 +192,21 @@ class AnsibleAWSModule:
return self._module.md5(*args, **kwargs)
def client(self, service, retry_decorator=None, **extra_params):
- region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self)
kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
kw_args.update(extra_params)
conn = boto3_conn(self, conn_type="client", resource=service, **kw_args)
return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator)
def resource(self, service, **extra_params):
- region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self)
kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs)
kw_args.update(extra_params)
return boto3_conn(self, conn_type="resource", resource=service, **kw_args)
@property
def region(self):
- return get_aws_region(self, True)
+ return get_aws_region(self)
def fail_json_aws(self, exception, msg=None, **kwargs):
"""call fail_json with processed exception
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
index 60b096f84..61b5edc1c 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/policy.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py
@@ -30,7 +30,6 @@
from functools import cmp_to_key
-import ansible.module_utils.common.warnings as ansible_warnings
from ansible.module_utils._text import to_text
from ansible.module_utils.six import binary_type
from ansible.module_utils.six import string_types
@@ -151,59 +150,3 @@ def compare_policies(current_policy, new_policy, default_version="2008-10-17"):
new_policy.setdefault("Version", default_version)
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
-
-
-def sort_json_policy_dict(policy_dict):
- """
- DEPRECATED - will be removed in amazon.aws 8.0.0
-
- Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
- different orders will return true
- Args:
- policy_dict (dict): Dict representing IAM JSON policy.
- Basic Usage:
- >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
- >>> sort_json_policy_dict(my_iam_policy)
- Returns:
- Dict: Will return a copy of the policy as a Dict but any List will be sorted
- {
- 'Principle': {
- 'AWS': [ '7', '14', '31', '101' ]
- }
- }
- """
-
- ansible_warnings.deprecate(
- (
- "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using "
- "amazon.aws.module_utils.policy.compare_policies instead"
- ),
- version="8.0.0",
- collection_name="amazon.aws",
- )
-
- def value_is_list(my_list):
- checked_list = []
- for item in my_list:
- if isinstance(item, dict):
- checked_list.append(sort_json_policy_dict(item))
- elif isinstance(item, list):
- checked_list.append(value_is_list(item))
- else:
- checked_list.append(item)
-
- # Sort list. If it's a list of dictionaries, sort by tuple of key-value
- # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
- checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
- return checked_list
-
- ordered_policy_dict = {}
- for key, value in policy_dict.items():
- if isinstance(value, dict):
- ordered_policy_dict[key] = sort_json_policy_dict(value)
- elif isinstance(value, list):
- ordered_policy_dict[key] = value_is_list(value)
- else:
- ordered_policy_dict[key] = value
-
- return ordered_policy_dict
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
index 85cde2e4e..20e0ae5e0 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/rds.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py
@@ -5,6 +5,9 @@
from collections import namedtuple
from time import sleep
+from typing import Any
+from typing import Dict
+from typing import List
try:
from botocore.exceptions import BotoCoreError
@@ -16,6 +19,8 @@ except ImportError:
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+from .botocore import is_boto3_error_code
+from .core import AnsibleAWSModule
from .retries import AWSRetry
from .tagging import ansible_dict_to_boto3_tag_list
from .tagging import boto3_tag_list_to_ansible_dict
@@ -440,3 +445,39 @@ def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove)
params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]}
_result, changed = call_method(client, module, method_name="add_role_to_db_instance", parameters=params)
return changed
+
+
+@AWSRetry.jittered_backoff()
+def describe_db_cluster_parameter_groups(
+ module: AnsibleAWSModule, connection: Any, group_name: str
+) -> List[Dict[str, Any]]:
+ result = []
+ try:
+ params = {}
+ if group_name is not None:
+ params["DBClusterParameterGroupName"] = group_name
+ paginator = connection.get_paginator("describe_db_cluster_parameter_groups")
+ result = paginator.paginate(**params).build_full_result()["DBClusterParameterGroups"]
+ except is_boto3_error_code("DBParameterGroupNotFound"):
+ pass
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access parameter groups information")
+ return result
+
+
+@AWSRetry.jittered_backoff()
+def describe_db_cluster_parameters(
+ module: AnsibleAWSModule, connection: Any, group_name: str, source: str = "all"
+) -> List[Dict[str, Any]]:
+ result = []
+ try:
+ paginator = connection.get_paginator("describe_db_cluster_parameters")
+ params = {"DBClusterParameterGroupName": group_name}
+ if source != "all":
+ params["Source"] = source
+ result = paginator.paginate(**params).build_full_result()["Parameters"]
+ except is_boto3_error_code("DBParameterGroupNotFound"):
+ pass
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Couldn't access RDS cluster parameters information")
+ return result
diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
index 73297ffc7..961f36f22 100644
--- a/ansible_collections/amazon/aws/plugins/module_utils/s3.py
+++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py
@@ -58,7 +58,7 @@ def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
- if "-" in etag:
+ if etag is not None and "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split("-")[1])
try:
@@ -73,7 +73,7 @@ def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None)
if not HAS_MD5:
return None
- if "-" in etag:
+ if etag is not None and "-" in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split("-")[1])
try:
diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
index fcd89b467..520bf9320 100644
--- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py
@@ -668,25 +668,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters
-ASG_ATTRIBUTES = (
- "AvailabilityZones",
- "DefaultCooldown",
- "DesiredCapacity",
- "HealthCheckGracePeriod",
- "HealthCheckType",
- "LaunchConfigurationName",
- "LoadBalancerNames",
- "MaxInstanceLifetime",
- "MaxSize",
- "MinSize",
- "AutoScalingGroupName",
- "PlacementGroup",
- "TerminationPolicies",
- "VPCZoneIdentifier",
-)
-
-INSTANCE_ATTRIBUTES = ("instance_id", "health_status", "lifecycle_state", "launch_config_name")
-
backoff_params = dict(retries=10, delay=3, backoff=1.5)
@@ -1109,7 +1090,7 @@ def wait_for_target_group(asg_connection, group_name):
def suspend_processes(ec2_connection, as_group):
- suspend_processes = set(module.params.get("suspend_processes"))
+ processes_to_suspend = set(module.params.get("suspend_processes"))
try:
suspended_processes = set([p["ProcessName"] for p in as_group["SuspendedProcesses"]])
@@ -1117,15 +1098,15 @@ def suspend_processes(ec2_connection, as_group):
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
- if suspend_processes == suspended_processes:
+ if processes_to_suspend == suspended_processes:
return False
- resume_processes = list(suspended_processes - suspend_processes)
+ resume_processes = list(suspended_processes - processes_to_suspend)
if resume_processes:
resume_asg_processes(ec2_connection, module.params.get("name"), resume_processes)
- if suspend_processes:
- suspend_asg_processes(ec2_connection, module.params.get("name"), list(suspend_processes))
+ if processes_to_suspend:
+ suspend_asg_processes(ec2_connection, module.params.get("name"), list(processes_to_suspend))
return True
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
index ae2e78068..49392fde0 100644
--- a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py
@@ -57,6 +57,8 @@ options:
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template),
I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ - The I(template) parameter has been deprecated and will be remove in a release after
+ 2026-05-01. It is recommended to use I(template_body) with the lookup plugin.
type: path
notification_arns:
description:
@@ -172,7 +174,9 @@ EXAMPLES = r"""
state: "present"
region: "us-east-1"
disable_rollback: true
- template: "files/cloudformation-example.json"
+ # The template parameter has been deprecated, use template_body with lookup instead.
+ # template: "files/cloudformation-example.json"
+ template_body: "{{ lookup('file', 'cloudformation-example.json') }}"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
@@ -188,7 +192,9 @@ EXAMPLES = r"""
state: "present"
region: "us-east-1"
disable_rollback: true
- template: "roles/cloudformation/files/cloudformation-example.json"
+ # The template parameter has been deprecated, use template_body with lookup instead.
+ # template: "roles/cloudformation/files/cloudformation-example.json"
+ template_body: "{{ lookup('file', 'cloudformation-example.json') }}"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
@@ -339,9 +345,17 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
-# Set a default, mostly for our integration tests. This will be overridden in
-# the main() loop to match the parameters we're passed
-retry_decorator = AWSRetry.jittered_backoff()
+
+@AWSRetry.jittered_backoff()
+def _search_events(cfn, stack_name, events_limit, token_filter):
+ pg = cfn.get_paginator("describe_stack_events").paginate(
+ StackName=stack_name,
+ PaginationConfig={"MaxItems": events_limit},
+ )
+ if token_filter is None:
+ return list(pg.search("StackEvents[*]"))
+
+ return list(pg.search(f"StackEvents[?ClientRequestToken == '{token_filter}']"))
def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
@@ -349,13 +363,7 @@ def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
ret = {"events": [], "log": []}
try:
- pg = cfn.get_paginator("describe_stack_events").paginate(
- StackName=stack_name, PaginationConfig={"MaxItems": events_limit}
- )
- if token_filter is not None:
- events = list(retry_decorator(pg.search)(f"StackEvents[?ClientRequestToken == '{token_filter}']"))
- else:
- events = list(pg.search("StackEvents[*]"))
+ events = _search_events(cfn, stack_name, events_limit, token_filter)
except is_boto3_error_message("does not exist"):
ret["log"].append("Stack does not exist.")
return ret
@@ -640,7 +648,13 @@ def main():
stack_name=dict(required=True),
template_parameters=dict(required=False, type="dict", default={}),
state=dict(default="present", choices=["present", "absent"]),
- template=dict(default=None, required=False, type="path"),
+ template=dict(
+ default=None,
+ required=False,
+ type="path",
+ removed_at_date="2026-05-01",
+ removed_from_collection="amazon.aws",
+ ),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
stack_policy_body=dict(default=None, required=False, type="json"),
diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
index 597d43f1b..6d9017f67 100644
--- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
+++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py
@@ -334,19 +334,6 @@ def tag_trail(module, client, tags, trail_arn, curr_tags=None, purge_tags=True):
return True
-def get_tag_list(keys, tags):
- """
- Returns a list of dicts with tags to act on
- keys : set of keys to get the values for
- tags : the dict of tags to turn into a list
- """
- tag_list = []
- for k in keys:
- tag_list.append({"Key": k, "Value": tags[k]})
-
- return tag_list
-
-
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
index 00ead5ce5..ec6663146 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py
@@ -339,6 +339,11 @@ description:
returned: when AMI is created or already exists
type: str
sample: "nat-server"
+enhanced_networking:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: when AMI is created or already exists
+ type: bool
+ sample: true
hypervisor:
description: Type of hypervisor.
returned: when AMI is created or already exists
@@ -349,11 +354,26 @@ image_id:
returned: when AMI is created or already exists
type: str
sample: "ami-1234abcd"
+image_owner_alias:
+ description: The owner alias ( amazon | aws-marketplace).
+ returned: when AMI is created or already exists
+ type: str
+ sample: "amazon"
+image_type:
+ description: Type of image.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "machine"
is_public:
description: Whether image is public.
returned: when AMI is created or already exists
type: bool
sample: false
+kernel_id:
+ description: The kernel associated with the image, if any. Only applicable for machine images.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "aki-88aa75e1"
launch_permission:
description: Permissions allowing other accounts to access the AMI.
returned: when AMI is created or already exists
@@ -379,6 +399,16 @@ platform:
description: Platform of image.
returned: when AMI is created or already exists
type: str
+ sample: "Windows"
+product_codes:
+ description: Any product codes associated with the AMI.
+ returned: when AMI is created or already exists
+ type: list
+ sample: []
+ramdisk_id:
+ description: The RAM disk associated with the image, if any. Only applicable for machine images.
+ returned: when AMI is created or already exists
+ type: str
sample: null
root_device_name:
description: Root device name of image.
@@ -390,11 +420,24 @@ root_device_type:
returned: when AMI is created or already exists
type: str
sample: "ebs"
+sriov_net_support:
+ description: Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.
+ returned: when AMI is created or already exists
+ type: str
+ sample: "simple"
state:
description: State of image.
returned: when AMI is created or already exists
type: str
sample: "available"
+state_reason:
+ description: The reason for the state change.
+ returned: when AMI is created or already exists
+ type: dict
+ sample: {
+ 'Code': 'string',
+ 'Message': 'string'
+ }
tags:
description: A dictionary of tags assigned to image.
returned: when AMI is created or already exists
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
index 2929a0292..906c141e1 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py
@@ -112,7 +112,6 @@ images:
sample: '2017-10-16T19:22:13.000Z'
description:
description: The description of the AMI.
- returned: always
type: str
sample: ''
ena_support:
@@ -163,6 +162,11 @@ images:
returned: always
type: str
sample: '123456789012'
+ platform_details:
+ description: Platform of image.
+ returned: always
+ type: str
+ sample: "Windows"
public:
description: Whether the image has public launch permissions.
returned: always
@@ -180,7 +184,6 @@ images:
sample: ebs
sriov_net_support:
description: Whether enhanced networking is enabled.
- returned: always
type: str
sample: simple
state:
@@ -192,6 +195,11 @@ images:
description: Any tags assigned to the image.
returned: always
type: dict
+ usage_operation:
+ description: The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.
+ returned: always
+ type: str
+ sample: "RunInstances"
virtualization_type:
description: The type of virtualization of the AMI.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
index c00dc515c..8e775582b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py
@@ -79,19 +79,58 @@ addresses:
description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
returned: on success
type: list
- sample: [{
- "allocation_id": "eipalloc-64de1b01",
- "association_id": "eipassoc-0fe9ce90d6e983e97",
- "domain": "vpc",
- "instance_id": "i-01020cfeb25b0c84f",
- "network_interface_id": "eni-02fdeadfd4beef9323b",
- "network_interface_owner_id": "0123456789",
- "private_ip_address": "10.0.0.1",
- "public_ip": "54.81.104.1",
- "tags": {
+ elements: dict
+ contains:
+ "allocation_id":
+ description: The ID representing the allocation of the address.
+ returned: always
+ type: str
+ sample: "eipalloc-64de1b01"
+ "association_id":
+ description: The ID representing the association of the address with an instance.
+ type: str
+ sample: "eipassoc-0fe9ce90d6e983e97"
+ "domain":
+ description: The network ( vpc).
+ type: str
+ returned: always
+ sample: "vpc"
+ "instance_id":
+ description: The ID of the instance that the address is associated with (if any).
+ returned: if any instance is associated
+ type: str
+ sample: "i-01020cfeb25b0c84f"
+ "network_border_group":
+ description: The name of the unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses.
+ returned: if any instance is associated
+ type: str
+ sample: "us-east-1"
+ "network_interface_id":
+ description: The ID of the network interface.
+ returned: if any instance is associated
+ type: str
+ sample: "eni-02fdeadfd4beef9323b"
+ "network_interface_owner_id":
+ description: The ID of the network interface.
+ returned: if any instance is associated
+ type: str
+ sample: "0123456789"
+ "private_ip_address":
+ description: The private IP address associated with the Elastic IP address.
+ returned: always
+ type: str
+ sample: "10.0.0.1"
+ "public_ip":
+ description: The Elastic IP address.
+ returned: if any instance is associated
+ type: str
+ sample: "54.81.104.1"
+ "tags":
+ description: Any tags assigned to the Elastic IP address.
+ type: dict
+ sample: {
"Name": "test-vm-54.81.104.1"
}
- }]
"""
try:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
index bf8e76a2b..794ed45a9 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py
@@ -217,15 +217,25 @@ interface:
returned: when state != absent
type: complex
contains:
+ attachment:
+ description: The network interface attachment.
+ type: dict
+ sample: {
+ "attach_time": "2024-04-25T20:57:20+00:00",
+ "attachment_id": "eni-attach-0ddce58b341a1846f",
+ "delete_on_termination": true,
+ "device_index": 0,
+ "instance_id": "i-032cb1cceb29250d2",
+ "status": "attached"
+ }
description:
description: interface description
type: str
sample: Firewall network interface
groups:
- description: list of security groups
- type: list
- elements: dict
- sample: [ { "sg-f8a8a9da": "default" } ]
+ description: dict of security groups
+ type: dict
+ sample: { "sg-f8a8a9da": "default" }
id:
description: network interface id
type: str
@@ -368,10 +378,7 @@ def correct_ip_count(connection, ip_count, module, eni_id):
for ip in eni["PrivateIpAddresses"]:
private_addresses.add(ip["PrivateIpAddress"])
- if len(private_addresses) == ip_count:
- return True
- else:
- return False
+ return bool(len(private_addresses) == ip_count)
def wait_for(function_pointer, *args):
@@ -395,7 +402,7 @@ def create_eni(connection, vpc_id, module):
private_ip_address = module.params.get("private_ip_address")
description = module.params.get("description")
security_groups = get_ec2_security_group_ids_from_names(
- module.params.get("security_groups"), connection, vpc_id=vpc_id, boto3=True
+ module.params.get("security_groups"), connection, vpc_id=vpc_id
)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
@@ -510,7 +517,7 @@ def modify_eni(connection, module, eni):
)
changed = True
if len(security_groups) > 0:
- groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"], boto3=True)
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni["VpcId"])
if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups):
if not module.check_mode:
connection.modify_network_interface_attribute(
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
index 5ef36b258..ca0a4bb22 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py
@@ -73,6 +73,7 @@ network_interfaces:
device_index: 1,
instance_id: "i-15b8d3cadbafa1234",
instance_owner_id: "123456789012",
+ "network_card_index": 0,
status: "attached"
}
availability_zone:
@@ -147,7 +148,6 @@ network_interfaces:
sample: []
requester_id:
description: The ID of the entity that launched the ENI.
- returned: always
type: str
sample: "AIDA12345EXAMPLE54321"
requester_managed:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
index 06089e4fe..c09cce97b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py
@@ -359,10 +359,12 @@ options:
type: int
required: false
tenancy:
- description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ description:
+ - Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ - Support for I(tenancy=host) was added in amazon.aws 7.6.0.
type: str
required: false
- choices: ['dedicated', 'default']
+ choices: ['dedicated', 'default', 'host']
license_specifications:
description:
- The license specifications to be used for the instance.
@@ -671,16 +673,67 @@ instances:
returned: always
type: str
sample: vol-12345678
+ capacity_reservation_specification:
+ description: Information about the Capacity Reservation targeting option.
+ type: complex
+ contains:
+ capacity_reservation_preference:
+ description: Describes the Capacity Reservation preferences.
+ type: str
+ sample: open
client_token:
description: The idempotency token you provided when you launched the instance, if applicable.
returned: always
type: str
sample: mytoken
+ cpu_options:
+ description: The CPU options for the instance.
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core.
+ type: int
+ sample: 2
+ amd_sev_snp:
+ description: Indicates whether the instance is enabled for AMD SEV-SNP.
+ type: str
+ sample: enabled
+ current_instance_boot_mode:
+ description: The boot mode that is used to boot the instance at launch or start.
+ type: str
+ sample: legacy-bios
ebs_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
+ ena_support:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ enclave_options:
+ description: Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ type: dict
+ contains:
+ enabled:
+ description: If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves.
+ returned: always
+ type: bool
+ sample: false
+ hibernation_options:
+ description: Indicates whether the instance is enabled for hibernation.
+ type: dict
+ contains:
+ configured:
+ description: If true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
+ returned: always
+ type: bool
+ sample: false
hypervisor:
description: The hypervisor type of the instance.
returned: always
@@ -737,6 +790,35 @@ instances:
returned: always
type: str
sample: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789
+ metadata_options:
+ description: The metadata options for the instance.
+ returned: always
+ type: complex
+ contains:
+ http_endpoint:
+ description: Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.
+ type: str
+ sample: enabled
+ http_protocol_ipv6:
+ description: Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.
+ type: str
+ sample: disabled
+ http_put_response_hop_limit:
+ description: The maximum number of hops that the metadata token can travel.
+ type: int
+ sample: 1
+ http_tokens:
+ description: Indicates whether IMDSv2 is required.
+ type: str
+ sample: optional
+ instance_metadata_tags:
+ description: Indicates whether access to instance tags from the instance metadata is enabled or disabled.
+ type: str
+ sample: disabled
+ state:
+ description: The state of the metadata option changes.
+ type: str
+ sample: applied
monitoring:
description: The monitoring for the instance.
returned: always
@@ -750,7 +832,8 @@ instances:
network_interfaces:
description: One or more network interfaces for the instance.
returned: always
- type: complex
+ type: list
+ elements: dict
contains:
association:
description: The association information for an Elastic IPv4 associated with the network interface.
@@ -797,6 +880,11 @@ instances:
returned: always
type: int
sample: 0
+ network_card_index:
+ description: The index of the network card.
+ returned: always
+ type: int
+ sample: 0
status:
description: The attachment state.
returned: always
@@ -823,6 +911,11 @@ instances:
returned: always
type: str
sample: mygroup
+ interface_type:
+ description: The type of network interface.
+ returned: always
+ type: str
+ sample: interface
ipv6_addresses:
description: One or more IPv6 addresses associated with the network interface.
returned: always
@@ -849,6 +942,11 @@ instances:
returned: always
type: str
sample: 01234567890
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -862,7 +960,6 @@ instances:
contains:
association:
description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
type: complex
contains:
ip_owner_id:
@@ -885,6 +982,11 @@ instances:
returned: always
type: bool
sample: true
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The private IPv4 address of the network interface.
returned: always
@@ -926,7 +1028,6 @@ instances:
type: str
group_id:
description: The ID of the placement group the instance is in (for cluster compute instances).
- returned: always
type: str
sample: "pg-01234566"
group_name:
@@ -936,16 +1037,13 @@ instances:
sample: "my-placement-group"
host_id:
description: The ID of the Dedicated Host on which the instance resides.
- returned: always
type: str
host_resource_group_arn:
description: The ARN of the host resource group in which the instance is in.
- returned: always
type: str
sample: "arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup"
partition_number:
description: The number of the partition the instance is in.
- returned: always
type: int
sample: 1
tenancy:
@@ -959,11 +1057,32 @@ instances:
type: str
version_added: 7.1.0
sample:
+ platform_details:
+ description: The platform details value for the instance.
+ returned: always
+ type: str
+ sample: Linux/UNIX
private_dns_name:
description: The private DNS name.
returned: always
type: str
sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_dns_name_options:
+ description: The options for the instance hostname.
+ type: dict
+ contains:
+ enable_resource_name_dns_a_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS A records.
+ type: bool
+ sample: false
+ enable_resource_name_dns_aaaa_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.
+ type: bool
+ sample: false
+ hostname_type:
+ description: The type of hostname to assign to an instance.
+ type: str
+ sample: ip-name
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -1021,7 +1140,7 @@ instances:
returned: always
type: str
sample: my-security-group
- network.source_dest_check:
+ source_dest_check:
description: Indicates whether source/destination checking is enabled.
returned: always
type: bool
@@ -1458,7 +1577,7 @@ def build_top_level_options(params):
return spec
-def build_instance_tags(params, propagate_tags_to_volumes=True):
+def build_instance_tags(params):
tags = params.get("tags") or {}
if params.get("name") is not None:
tags["Name"] = params.get("name")
@@ -1930,7 +2049,7 @@ def change_instance_state(filters, desired_module_state):
if inst["State"]["Name"] in ("pending", "running"):
unchanged.add(inst["InstanceId"])
continue
- elif inst["State"]["Name"] == "stopping":
+ if inst["State"]["Name"] == "stopping":
await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True)
if module.check_mode:
@@ -2029,63 +2148,60 @@ def handle_existing(existing_matches, state, filters):
return result
-def enforce_count(existing_matches, module, desired_module_state):
+def enforce_count(existing_matches, desired_module_state):
exact_count = module.params.get("exact_count")
- try:
- current_count = len(existing_matches)
- if current_count == exact_count:
- module.exit_json(
- changed=False,
- instances=[pretty_instance(i) for i in existing_matches],
- instance_ids=[i["InstanceId"] for i in existing_matches],
- msg=f"{exact_count} instances already running, nothing to do.",
- )
+ current_count = len(existing_matches)
+ if current_count == exact_count:
+ return dict(
+ changed=False,
+ instances=[pretty_instance(i) for i in existing_matches],
+ instance_ids=[i["InstanceId"] for i in existing_matches],
+ msg=f"{exact_count} instances already running, nothing to do.",
+ )
- elif current_count < exact_count:
- # launch instances
- try:
- ensure_present(
- existing_matches=existing_matches,
- desired_module_state=desired_module_state,
- current_count=current_count,
- )
- except botocore.exceptions.ClientError as e:
- module.fail_json(e, msg="Unable to launch instances")
- elif current_count > exact_count:
- to_terminate = current_count - exact_count
- # sort the instances from least recent to most recent based on launch time
- existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"])
- # get the instance ids of instances with the count tag on them
- all_instance_ids = [x["InstanceId"] for x in existing_matches]
- terminate_ids = all_instance_ids[0:to_terminate]
- if module.check_mode:
- module.exit_json(
- changed=True,
- terminated_ids=terminate_ids,
- instance_ids=all_instance_ids,
- msg=f"Would have terminated following instances if not in check mode {terminate_ids}",
- )
- # terminate instances
- try:
- client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids)
- await_instances(terminate_ids, desired_module_state="terminated", force_wait=True)
- except is_boto3_error_code("InvalidInstanceID.NotFound"):
- pass
- except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
- module.fail_json(e, msg="Unable to terminate instances")
- # include data for all matched instances in addition to the list of terminations
- # allowing for recovery of metadata from the destructive operation
- module.exit_json(
- changed=True,
- msg="Successfully terminated instances.",
- terminated_ids=terminate_ids,
- instance_ids=all_instance_ids,
- instances=existing_matches,
- )
+ if current_count < exact_count:
+ # launch instances
+ return ensure_present(
+ existing_matches=existing_matches,
+ desired_module_state=desired_module_state,
+ current_count=current_count,
+ )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to enforce instance count")
+ to_terminate = current_count - exact_count
+ # sort the instances from least recent to most recent based on launch time
+ existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"])
+ # get the instance ids of instances with the count tag on them
+ all_instance_ids = [x["InstanceId"] for x in existing_matches]
+ terminate_ids = all_instance_ids[0:to_terminate]
+ if module.check_mode:
+ return dict(
+ changed=True,
+ terminated_ids=terminate_ids,
+ instance_ids=all_instance_ids,
+ msg=f"Would have terminated following instances if not in check mode {terminate_ids}",
+ )
+ # terminate instances
+ try:
+ client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids)
+ await_instances(terminate_ids, desired_module_state="terminated", force_wait=True)
+ except is_boto3_error_code("InvalidInstanceID.NotFound"):
+ pass
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ ) as e: # pylint: disable=duplicate-except
+ module.fail_json(e, msg="Unable to terminate instances")
+
+ # include data for all matched instances in addition to the list of terminations
+ # allowing for recovery of metadata from the destructive operation
+ return dict(
+ changed=True,
+ msg="Successfully terminated instances.",
+ terminated_ids=terminate_ids,
+ instance_ids=all_instance_ids,
+ instances=existing_matches,
+ )
def ensure_present(existing_matches, desired_module_state, current_count=None):
@@ -2100,7 +2216,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
if module.check_mode:
if existing_matches:
instance_ids = [x["InstanceId"] for x in existing_matches]
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
instances=existing_matches,
@@ -2108,7 +2224,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
msg="Would have launched instances if not in check_mode.",
)
else:
- module.exit_json(
+ return dict(
changed=True,
spec=instance_spec,
msg="Would have launched instances if not in check_mode.",
@@ -2144,14 +2260,14 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
all_instance_ids = [x["InstanceId"] for x in existing_matches] + instance_ids
if not module.params.get("wait"):
if existing_matches:
- module.exit_json(
+ return dict(
changed=True,
changed_ids=instance_ids,
instance_ids=all_instance_ids,
spec=instance_spec,
)
else:
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
spec=instance_spec,
@@ -2161,7 +2277,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
if existing_matches:
all_instances = existing_matches + instances
- module.exit_json(
+ return dict(
changed=True,
changed_ids=instance_ids,
instance_ids=all_instance_ids,
@@ -2169,7 +2285,7 @@ def ensure_present(existing_matches, desired_module_state, current_count=None):
spec=instance_spec,
)
else:
- module.exit_json(
+ return dict(
changed=True,
instance_ids=instance_ids,
instances=[pretty_instance(i) for i in instances],
@@ -2307,7 +2423,7 @@ def main():
host_id=dict(type="str"),
host_resource_group_arn=dict(type="str"),
partition_number=dict(type="int"),
- tenancy=dict(type="str", choices=["dedicated", "default"]),
+ tenancy=dict(type="str", choices=["dedicated", "default", "host"]),
),
),
instance_initiated_shutdown_behavior=dict(type="str", choices=["stop", "terminate"]),
@@ -2396,7 +2512,7 @@ def main():
changed=False,
)
elif module.params.get("exact_count"):
- enforce_count(existing_matches, module, desired_module_state=state)
+ result = enforce_count(existing_matches, desired_module_state=state)
elif existing_matches and not module.params.get("count"):
for match in existing_matches:
warn_if_public_ip_assignment_changed(match)
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
index 1caea9365..af12729eb 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py
@@ -161,6 +161,14 @@ instances:
returned: always
type: str
sample: vol-12345678
+ capacity_reservation_specification:
+ description: Information about the Capacity Reservation targeting option.
+ type: complex
+ contains:
+ capacity_reservation_preference:
+ description: Describes the Capacity Reservation preferences.
+ type: str
+ sample: open
cpu_options:
description: The CPU options set for the instance.
returned: always
@@ -181,11 +189,38 @@ instances:
returned: always
type: str
sample: mytoken
+ current_instance_boot_mode:
+ description: The boot mode that is used to boot the instance at launch or start.
+ type: str
+ sample: legacy-bios
ebs_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
+ ena_support:
+ description: Specifies whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ enclave_options:
+ description: Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
+ type: dict
+ contains:
+ enabled:
+ description: If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves.
+ returned: always
+ type: bool
+ sample: false
+ hibernation_options:
+ description: Indicates whether the instance is enabled for hibernation.
+ type: dict
+ contains:
+ configured:
+ description: If true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
+ returned: always
+ type: bool
+ sample: false
hypervisor:
description: The hypervisor type of the instance.
returned: always
@@ -193,7 +228,6 @@ instances:
sample: xen
iam_instance_profile:
description: The IAM instance profile associated with the instance, if applicable.
- returned: always
type: complex
contains:
arn:
@@ -231,6 +265,44 @@ instances:
returned: always
type: str
sample: "2017-03-23T22:51:24+00:00"
+ maintenance_options:
+ description: Provides information on the recovery and maintenance options of your instance.
+ returned: always
+ type: dict
+ contains:
+ auto_recovery:
+ description: Provides information on the current automatic recovery behavior of your instance.
+ type: str
+ sample: default
+ metadata_options:
+ description: The metadata options for the instance.
+ returned: always
+ type: complex
+ contains:
+ http_endpoint:
+ description: Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.
+ type: str
+ sample: enabled
+ http_protocol_ipv6:
+ description: Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.
+ type: str
+ sample: disabled
+ http_put_response_hop_limit:
+ description: The maximum number of hops that the metadata token can travel.
+ type: int
+ sample: 1
+ http_tokens:
+ description: Indicates whether IMDSv2 is required.
+ type: str
+ sample: optional
+ instance_metadata_tags:
+ description: Indicates whether access to instance tags from the instance metadata is enabled or disabled.
+ type: str
+ sample: disabled
+ state:
+ description: The state of the metadata option changes.
+ type: str
+ sample: applied
monitoring:
description: The monitoring for the instance.
returned: always
@@ -291,6 +363,11 @@ instances:
returned: always
type: int
sample: 0
+ network_card_index:
+ description: The index of the network card.
+ returned: always
+ type: int
+ sample: 0
status:
description: The attachment state.
returned: always
@@ -317,6 +394,11 @@ instances:
returned: always
type: str
sample: mygroup
+ interface_type:
+ description: The type of network interface.
+ returned: always
+ type: str
+ sample: interface
ipv6_addresses:
description: One or more IPv6 addresses associated with the network interface.
returned: always
@@ -343,6 +425,11 @@ instances:
returned: always
type: str
sample: 01234567890
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
@@ -356,7 +443,6 @@ instances:
contains:
association:
description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
type: complex
contains:
ip_owner_id:
@@ -379,6 +465,11 @@ instances:
returned: always
type: bool
sample: true
+ private_dns_name:
+ description: The private DNS hostname name assigned to the instance.
+ type: str
+ returned: always
+ sample: ip-10-1-0-156.ec2.internal
private_ip_address:
description: The private IPv4 address of the network interface.
returned: always
@@ -424,11 +515,32 @@ instances:
returned: always
type: str
sample: default
+ platform_details:
+ description: The platform details value for the instance.
+ returned: always
+ type: str
+ sample: Linux/UNIX
private_dns_name:
description: The private DNS name.
returned: always
type: str
sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_dns_name_options:
+ description: The options for the instance hostname.
+ type: dict
+ contains:
+ enable_resource_name_dns_a_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS A records.
+ type: bool
+ sample: false
+ enable_resource_name_dns_aaaa_record:
+ description: Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.
+ type: bool
+ sample: false
+ hostname_type:
+ description: The type of hostname to assign to an instance.
+ type: str
+ sample: ip-name
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
index 26ecaad0a..83fdd4417 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py
@@ -450,6 +450,8 @@ socket.setdefaulttimeout(5)
# The ec2_metadata_facts module is a special case, while we generally dropped support for Python < 3.6
# this module doesn't depend on the SDK and still has valid use cases for folks working with older
# OSes.
+
+# pylint: disable=consider-using-f-string
try:
json_decode_error = json.JSONDecodeError
except AttributeError:
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
index 9d16f339f..44afa7bff 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py
@@ -413,8 +413,8 @@ EXAMPLES = r"""
"""
RETURN = r"""
-group_name:
- description: Security group name
+description:
+ description: Description of security group
sample: My Security Group
type: str
returned: on create/update
@@ -423,11 +423,132 @@ group_id:
sample: sg-abcd1234
type: str
returned: on create/update
-description:
- description: Description of security group
+group_name:
+ description: Security group name
sample: My Security Group
type: str
returned: on create/update
+ip_permissions:
+ description: The inbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ from_port:
+ description: If the protocol is TCP or UDP, this is the start of the port range.
+ type: int
+ sample: 80
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ to_group:
+ description: If the protocol is TCP or UDP, this is the end of the port range.
+ type: int
+ sample: 80
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+ip_permissions_egress:
+ description: The outbound rules associated with the security group.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ip_protocol:
+ description: The IP protocol name or number.
+ returned: always
+ type: str
+ ip_ranges:
+ description: The IPv4 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ip:
+ description: The IPv4 CIDR range.
+ returned: always
+ type: str
+ ipv6_ranges:
+ description: The IPv6 ranges.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ cidr_ipv6:
+ description: The IPv6 CIDR range.
+ returned: always
+ type: str
+ prefix_list_ids:
+ description: The prefix list IDs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ prefix_list_id:
+ description: The ID of the prefix.
+ returned: always
+ type: str
+ user_id_group_pairs:
+ description: The security group and AWS account ID pairs.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The security group ID of the pair.
+ returned: always
+ type: str
+ user_id:
+ description: The user ID of the pair.
+ returned: always
+ type: str
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
tags:
description: Tags associated with the security group
sample:
@@ -440,35 +561,6 @@ vpc_id:
sample: vpc-abcd1234
type: str
returned: on create/update
-ip_permissions:
- description: Inbound rules associated with the security group.
- sample:
- - from_port: 8182
- ip_protocol: tcp
- ip_ranges:
- - cidr_ip: "198.51.100.1/32"
- ipv6_ranges: []
- prefix_list_ids: []
- to_port: 8182
- user_id_group_pairs: []
- type: list
- returned: on create/update
-ip_permissions_egress:
- description: Outbound rules associated with the security group.
- sample:
- - ip_protocol: -1
- ip_ranges:
- - cidr_ip: "0.0.0.0/0"
- ipv6_ranges: []
- prefix_list_ids: []
- user_id_group_pairs: []
- type: list
- returned: on create/update
-owner_id:
- description: AWS Account ID of the security group
- sample: 123456789012
- type: int
- returned: on create/update
"""
import itertools
@@ -532,7 +624,7 @@ def rule_cmp(a, b):
# equal protocols can interchange `(-1, -1)` and `(None, None)`
if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
continue
- elif getattr(a, prop) != getattr(b, prop):
+ if getattr(a, prop) != getattr(b, prop):
return False
elif getattr(a, prop) != getattr(b, prop):
return False
@@ -1296,8 +1388,7 @@ def flatten_nested_targets(module, rules):
date="2024-12-01",
collection_name="amazon.aws",
)
- for t in _flatten(target):
- yield t
+ yield from _flatten(target)
elif isinstance(target, string_types):
yield target
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
index 8b7a04ba1..fe1002f2c 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py
@@ -107,6 +107,10 @@ security_groups:
type: list
elements: dict
contains:
+ from_port:
+ description: If the protocol is TCP or UDP, this is the start of the port range.
+ type: int
+ sample: 80
ip_protocol:
description: The IP protocol name or number.
returned: always
@@ -141,6 +145,10 @@ security_groups:
description: The ID of the prefix.
returned: always
type: str
+ to_group:
+ description: If the protocol is TCP or UDP, this is the end of the port range.
+ type: int
+ sample: 80
user_id_group_pairs:
description: The security group and AWS account ID pairs.
returned: always
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
index 6fa2ca47b..de63d3703 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py
@@ -329,22 +329,6 @@ def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True):
return vol
-def get_volumes(module, ec2_conn):
- instance = module.params.get("instance")
-
- find_params = dict()
- if instance:
- find_params["Filters"] = ansible_dict_to_boto3_filter_list({"attachment.instance-id": instance})
-
- vols = []
- try:
- vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params)
- vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get("Volumes", [])]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error while getting EBS volumes")
- return vols
-
-
def delete_volume(module, ec2_conn, volume_id=None):
changed = False
if volume_id:
@@ -858,7 +842,7 @@ def main():
elif state == "absent":
if not name and not param_id:
module.fail_json("A volume name or id is required for deletion")
- if volume:
+ if volume and volume.get("state") not in ("deleting", "deleted"):
if module.check_mode:
module.exit_json(changed=True, msg="Would have deleted volume if not in check mode.")
detach_volume(module, ec2_conn, volume_dict=volume)
diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
index 34f12e789..1d41b89ea 100644
--- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
+++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py
@@ -843,7 +843,8 @@ def ensure_route_table_present(connection, module):
if changed:
# pause to allow route table routes/subnets/associations to be updated before exiting with final state
sleep(5)
- module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+ return dict(changed=changed, route_table=get_route_table_info(connection, module, route_table))
def main():
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
index ac3bb3642..25ebd8c84 100644
--- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py
@@ -236,7 +236,7 @@ EXAMPLES = r"""
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ Certificates: # The ARN of the certificate
- CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required.
@@ -260,7 +260,7 @@ EXAMPLES = r"""
Port: 80 # Required. The port on which the load balancer is listening.
# The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
+ Certificates: # The ARN of the certificate
- CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
DefaultActions:
- Type: forward # Required.
@@ -330,6 +330,29 @@ EXAMPLES = r"""
Type: forward
state: present
+# Create an ALB with a listener having multiple listener certificates
+- amazon.aws.elb_application_lb:
+ name: myalb
+ security_groups:
+ - sg-12345678
+ - my-sec-group
+ subnets:
+ - subnet-012345678
+ - subnet-abcdef000
+ listeners:
+ - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
+ Port: 80 # Required. The port on which the load balancer is listening.
+ # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
+ SslPolicy: ELBSecurityPolicy-2015-05
+ Certificates: # The ARN of the certificate (first certificate in the list will be set as default certificate)
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/test.domain.com
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/secondtest.domain.com
+ - CertificateArn: arn:aws:iam::123456789012:server-certificate/thirdtest.domain.com
+ DefaultActions:
+ - Type: forward # Required.
+ TargetGroupName: # Required. The name of the target group
+ state: present
+
# Remove an ALB
- amazon.aws.elb_application_lb:
name: myalb
diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
index 4008b8029..60134f0e3 100644
--- a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
+++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py
@@ -1412,7 +1412,7 @@ class ElbManager:
if not self.health_check:
return False
- """Set health check values on ELB as needed"""
+ # Set health check values on ELB as needed
health_check_config = self._format_healthcheck()
if self.elb and health_check_config == self.elb["HealthCheck"]:
@@ -1490,14 +1490,6 @@ class ElbManager:
def _policy_name(self, policy_type):
return f"ec2-elb-lb-{policy_type}"
- def _get_listener_policies(self):
- """Get a list of listener policies mapped to the LoadBalancerPort"""
- if not self.elb:
- return {}
- listener_descriptions = self.elb.get("ListenerDescriptions", [])
- policies = {l["LoadBalancerPort"]: l["PolicyNames"] for l in listener_descriptions}
- return policies
-
def _set_listener_policies(self, port, policies):
self.changed = True
if self.check_mode:
@@ -1705,7 +1697,7 @@ class ElbManager:
proxy_protocol = listener.get("proxy_protocol", None)
# Only look at the listeners for which proxy_protocol is defined
if proxy_protocol is None:
- next
+ continue
instance_port = listener.get("instance_port")
if proxy_ports.get(instance_port, None) is not None:
if proxy_ports[instance_port] != proxy_protocol:
@@ -1725,10 +1717,10 @@ class ElbManager:
if any(proxy_ports.values()):
changed |= self._set_proxy_protocol_policy(proxy_policy_name)
- for port in proxy_ports:
+ for port, port_policy in proxy_ports.items():
current_policies = set(backend_policies.get(port, []))
new_policies = list(current_policies - proxy_policies)
- if proxy_ports[port]:
+ if port_policy:
new_policies.append(proxy_policy_name)
changed |= self._set_backend_policy(port, new_policies)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
index fb2d98e08..0a654dec5 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py
@@ -340,7 +340,7 @@ def main():
"The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are"
" returned for now."
),
- date="2024-08-01",
+ version="9.0.0",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role.py b/ansible_collections/amazon/aws/plugins/modules/iam_role.py
index a7da38c31..3262a7a92 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_role.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_role.py
@@ -174,8 +174,8 @@ iam_role:
description:
- the policy that grants an entity permission to assume the role
- |
- note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change
+ Note: the case of keys in this dictionary are no longer converted from CamelCase to
+ snake_case. This behaviour changed in release 8.0.0.
type: dict
returned: always
sample: {
@@ -192,23 +192,14 @@ iam_role:
'version': '2012-10-17'
}
assume_role_policy_document_raw:
- description: the policy that grants an entity permission to assume the role
+ description:
+ - |
+ Note: this return value has been deprecated and will be removed in a release after
+ 2026-05-01. assume_role_policy_document and assume_role_policy_document_raw now use
+ the same format.
type: dict
returned: always
version_added: 5.3.0
- sample: {
- 'Statement': [
- {
- 'Action': 'sts:AssumeRole',
- 'Effect': 'Allow',
- 'Principal': {
- 'Service': 'ec2.amazonaws.com'
- },
- 'Sid': ''
- }
- ],
- 'Version': '2012-10-17'
- }
attached_policies:
description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
@@ -504,7 +495,7 @@ def create_or_update_role(module, client):
role["AttachedPolicies"] = list_iam_role_attached_policies(client, role_name)
camel_role = normalize_iam_role(role, _v7_compat=True)
- module.exit_json(changed=changed, iam_role=camel_role, **camel_role)
+ module.exit_json(changed=changed, iam_role=camel_role)
def create_instance_profiles(client, check_mode, role_name, path):
@@ -658,17 +649,10 @@ def main():
)
module.deprecate(
- "All return values other than iam_role and changed have been deprecated and "
- "will be removed in a release after 2023-12-01.",
- date="2023-12-01",
- collection_name="amazon.aws",
- )
- module.deprecate(
- "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- "iam_role.assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01",
+ "In a release after 2026-05-01 iam_role.assume_role_policy_document_raw "
+ "will no longer be returned. Since release 8.0.0 assume_role_policy_document "
+ "has been returned with the same format as iam_role.assume_role_policy_document_raw",
+ date="2026-05-01",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
index e77689878..fb4a06466 100644
--- a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py
@@ -67,12 +67,16 @@ iam_roles:
description:
- The policy that grants an entity permission to assume the role
- |
- Note: the case of keys in this dictionary are currently converted from CamelCase to
- snake_case. In a release after 2023-12-01 this behaviour will change.
+ Note: the case of keys in this dictionary are no longer converted from CamelCase to
+ snake_case. This behaviour changed in release 8.0.0.
returned: always
type: dict
assume_role_policy_document_raw:
- description: The policy document describing what can assume the role.
+ description:
+ - |
+ Note: this return value has been deprecated and will be removed in a release after
+ 2026-05-01. assume_role_policy_document and assume_role_policy_document_raw now use
+ the same format.
returned: always
type: dict
version_added: 5.3.0
@@ -208,11 +212,10 @@ def main():
path_prefix = module.params["path_prefix"]
module.deprecate(
- "In a release after 2023-12-01 the contents of assume_role_policy_document "
- "will no longer be converted from CamelCase to snake_case. The "
- ".assume_role_policy_document_raw return value already returns the "
- "policy document in this future format.",
- date="2023-12-01",
+ "In a release after 2026-05-01 iam_role.assume_role_policy_document_raw "
+ "will no longer be returned. Since release 8.0.0 assume_role_policy_document "
+ "has been returned with the same format as iam_role.assume_role_policy_document_raw",
+ date="2026-05-01",
collection_name="amazon.aws",
)
@@ -226,10 +229,10 @@ def main():
if validation_error:
_prefix = "/" if not path_prefix.startswith("/") else ""
_suffix = "/" if not path_prefix.endswith("/") else ""
- path_prefix = "{_prefix}{path_prefix}{_suffix}"
+ path_prefix = f"{_prefix}{path_prefix}{_suffix}"
module.deprecate(
"In a release after 2026-05-01 paths must begin and end with /. "
- "path_prefix has been modified to '{path_prefix}'",
+ f"path_prefix has been modified to '{path_prefix}'",
date="2026-05-01",
collection_name="amazon.aws",
)
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
index 82f73b370..47e52978d 100644
--- a/ansible_collections/amazon/aws/plugins/modules/kms_key.py
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py
@@ -156,6 +156,7 @@ notes:
This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata
shortly after modifying keys.
For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata.
+ - The C(policies) return key was removed in amazon.aws release 8.0.0.
"""
EXAMPLES = r"""
@@ -281,41 +282,6 @@ aliases:
sample:
- aws/acm
- aws/ebs
-policies:
- description: List of policy documents for the key. Empty when access is denied even if there are policies.
- type: list
- returned: always
- elements: str
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "123456789012"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::123456789012:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
key_policies:
description: List of policy documents for the key. Empty when access is denied even if there are policies.
type: list
@@ -435,14 +401,6 @@ multi_region:
sample: False
"""
-# these mappings are used to go from simple labels to the actual 'Sid' values returned
-# by get_policy. They seem to be magic values.
-statement_label = {
- "role": "Allow use of the key",
- "role grant": "Allow attachment of persistent resources",
- "admin": "Allow access for Key Administrators",
-}
-
import json
try:
@@ -462,12 +420,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_
@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
-def get_iam_roles_with_backoff(connection):
- paginator = connection.get_paginator("list_roles")
- return paginator.paginate().build_full_result()
-
-
-@AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator("list_keys")
return paginator.paginate().build_full_result()
@@ -598,20 +550,11 @@ def get_key_details(connection, module, key_id):
module.fail_json_aws(e, msg="Failed to obtain key grants")
tags = get_kms_tags(connection, module, key_id)
result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue")
- result["policies"] = get_kms_policies(connection, module, key_id)
- result["key_policies"] = [json.loads(policy) for policy in result["policies"]]
+ policies = get_kms_policies(connection, module, key_id)
+ result["key_policies"] = [json.loads(policy) for policy in policies]
return result
-def get_kms_facts(connection, module):
- try:
- keys = get_kms_keys_with_backoff(connection)["Keys"]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain keys")
-
- return [get_key_details(connection, module, key["KeyId"]) for key in keys]
-
-
def convert_grant_params(grant, key):
grant_params = dict(KeyId=key["key_arn"], GranteePrincipal=grant["grantee_principal"])
if grant.get("operations"):
@@ -947,13 +890,6 @@ def delete_key(connection, module, key_metadata):
return result
-def get_arn_from_role_name(iam, rolename):
- ret = iam.get_role(RoleName=rolename)
- if ret.get("Role") and ret["Role"].get("Arn"):
- return ret["Role"]["Arn"]
- raise Exception(f"could not find arn for name {rolename}.")
-
-
def canonicalize_alias_name(alias):
if alias is None:
return None
@@ -1037,15 +973,6 @@ def main():
kms = module.client("kms")
- module.deprecate(
- (
- "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned"
- " for now."
- ),
- date="2024-05-01",
- collection_name="amazon.aws",
- )
-
key_metadata = fetch_key_metadata(kms, module, module.params.get("key_id"), module.params.get("alias"))
validate_params(module, key_metadata)
diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
index 4ba249940..6f0eb2f4b 100644
--- a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py
@@ -49,6 +49,8 @@ options:
description: Whether to get full details (tags, grants etc.) of keys pending deletion.
default: False
type: bool
+notes:
+ - The C(policies) return key was removed in amazon.aws release 8.0.0.
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
@@ -154,41 +156,6 @@ kms_keys:
sample:
Name: myKey
Purpose: protecting_stuff
- policies:
- description: List of policy documents for the key. Empty when access is denied even if there are policies.
- type: list
- returned: always
- elements: str
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "123456789012"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::123456789012:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
key_policies:
description: List of policy documents for the key. Empty when access is denied even if there are policies.
type: list
@@ -480,8 +447,8 @@ def get_key_details(connection, module, key_id, tokens=None):
result = camel_dict_to_snake_dict(result)
result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue")
- result["policies"] = get_kms_policies(connection, module, key_id)
- result["key_policies"] = [json.loads(policy) for policy in result["policies"]]
+ policies = get_kms_policies(connection, module, key_id)
+ result["key_policies"] = [json.loads(policy) for policy in policies]
return result
@@ -523,15 +490,6 @@ def main():
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to connect to AWS")
- module.deprecate(
- (
- "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned"
- " for now."
- ),
- date="2024-05-01",
- collection_name="amazon.aws",
- )
-
all_keys = get_kms_info(connection, module)
filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params["filters"])]
ret_params = dict(kms_keys=filtered_keys)
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
index c916ae8e8..424ad5abe 100644
--- a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py
@@ -54,22 +54,28 @@ options:
type: str
source_params:
description:
- - Sub-parameters required for event source.
+ - Sub-parameters required for event source.
suboptions:
source_arn:
description:
- - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
+ - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
type: str
required: true
enabled:
description:
- - Indicates whether AWS Lambda should begin polling or readin from the event source.
+ - Indicates whether AWS Lambda should begin polling or readin from the event source.
default: true
type: bool
batch_size:
description:
- - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
- default: 100
+ - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
+ - Amazon Kinesis - Default V(100). Max V(10000).
+ - Amazon DynamoDB Streams - Default V(100). Max V(10000).
+ - Amazon Simple Queue Service - Default V(10). For standard queues the max is V(10000). For FIFO queues the max is V(10).
+ - Amazon Managed Streaming for Apache Kafka - Default V(100). Max V(10000).
+ - Self-managed Apache Kafka - Default C(100). Max V(10000).
+ - Amazon MQ (ActiveMQ and RabbitMQ) - Default V(100). Max V(10000).
+ - DocumentDB - Default V(100). Max V(10000).
type: int
starting_position:
description:
@@ -84,6 +90,15 @@ options:
elements: str
choices: [ReportBatchItemFailures]
version_added: 5.5.0
+ maximum_batching_window_in_seconds:
+ description:
+ - The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.
+ - You can configure O(source_params.maximum_batching_window_in_seconds) to any value from V(0) seconds to V(300) seconds in increments of seconds.
+ - For streams and Amazon SQS event sources, when O(source_params.batch_size) is set to a value greater than V(10),
+ O(source_params.maximum_batching_window_in_seconds) defaults to V(1).
+ - O(source_params.maximum_batching_window_in_seconds) is not supported by FIFO queues.
+ type: int
+ version_added: 8.0.0
required: true
type: dict
extends_documentation_fragment:
@@ -135,9 +150,11 @@ lambda_stream_events:
type: list
"""
+import copy
import re
try:
+ from botocore.exceptions import BotoCoreError
from botocore.exceptions import ClientError
from botocore.exceptions import MissingParametersError
from botocore.exceptions import ParamValidationError
@@ -146,9 +163,9 @@ except ImportError:
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto3_conn
-from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_aws_connection_info
+from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
# ---------------------------------------------------------------------------------------------------
#
@@ -157,122 +174,47 @@ from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleA
# ---------------------------------------------------------------------------------------------------
-class AWSConnection:
- """
- Create the connection object and client objects as required.
- """
-
- def __init__(self, ansible_obj, resources, use_boto3=True):
- try:
- self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
-
- self.resource_client = dict()
- if not resources:
- resources = ["lambda"]
-
- resources.append("iam")
-
- for resource in resources:
- aws_connect_kwargs.update(
- dict(region=self.region, endpoint=self.endpoint, conn_type="client", resource=resource)
- )
- self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
-
- # if region is not provided, then get default profile/session region
- if not self.region:
- self.region = self.resource_client["lambda"].meta.region_name
-
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- ansible_obj.fail_json(msg=f"Unable to connect, authorize or access resource: {e}")
-
- # set account ID
- try:
- self.account_id = self.resource_client["iam"].get_user()["User"]["Arn"].split(":")[4]
- except (ClientError, ValueError, KeyError, IndexError):
- self.account_id = ""
-
- def client(self, resource="lambda"):
- return self.resource_client[resource]
-
-
-def pc(key):
- """
- Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
-
- :param key:
- :return:
- """
-
- return "".join([token.capitalize() for token in key.split("_")])
-
-
-def ordered_obj(obj):
- """
- Order object for comparison purposes
-
- :param obj:
- :return:
- """
-
- if isinstance(obj, dict):
- return sorted((k, ordered_obj(v)) for k, v in obj.items())
- if isinstance(obj, list):
- return sorted(ordered_obj(x) for x in obj)
- else:
- return obj
-
-
-def set_api_sub_params(params):
- """
- Sets module sub-parameters to those expected by the boto3 API.
-
- :param params:
- :return:
- """
-
- api_params = dict()
-
- for param in params.keys():
- param_value = params.get(param, None)
- if param_value:
- api_params[pc(param)] = param_value
-
- return api_params
-
-
-def validate_params(module, aws):
+def validate_params(module, client):
"""
Performs basic parameter validation.
- :param module:
- :param aws:
+ :param module: The AnsibleAWSModule object
+ :param client: The client used to perform requests to AWS
:return:
"""
function_name = module.params["lambda_function_arn"]
+ qualifier = get_qualifier(module)
# validate function name
if not re.search(r"^[\w\-:]+$", function_name):
module.fail_json(
msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.",
)
- if len(function_name) > 64 and not function_name.startswith("arn:aws:lambda:"):
- module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit')
- elif len(function_name) > 140 and function_name.startswith("arn:aws:lambda:"):
- module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit')
-
- # check if 'function_name' needs to be expanded in full ARN format
- if not module.params["lambda_function_arn"].startswith("arn:aws:lambda:"):
- function_name = module.params["lambda_function_arn"]
- module.params["lambda_function_arn"] = f"arn:aws:lambda:{aws.region}:{aws.account_id}:function:{function_name}"
-
- qualifier = get_qualifier(module)
- if qualifier:
- function_arn = module.params["lambda_function_arn"]
- module.params["lambda_function_arn"] = f"{function_arn}:{qualifier}"
+ # lamba_fuction_arn contains only the function name (not the arn)
+ if not function_name.startswith("arn:aws:lambda:"):
+ if len(function_name) > 64:
+ module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit')
+ try:
+ params = {"FunctionName": function_name}
+ if qualifier:
+ params["Qualifier"] = qualifier
+ response = client.get_function(**params)
+ module.params["lambda_function_arn"] = response["Configuration"]["FunctionArn"]
+ except is_boto3_error_code("ResourceNotFoundException"):
+ msg = f"An error occurred: The function '{function_name}' does not exist."
+ if qualifier:
+ msg = f"An error occurred: The function '{function_name}' (qualifier={qualifier}) does not exist."
+ module.fail_json(msg=msg)
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json(msg=f"An error occurred while trying to describe function '{function_name}': {e}")
+ else:
+ if len(function_name) > 140:
+ module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit')
- return
+ if qualifier:
+ module.params["lambda_function_arn"] = f"{function_name}:{qualifier}"
def get_qualifier(module):
@@ -302,7 +244,38 @@ def get_qualifier(module):
# ---------------------------------------------------------------------------------------------------
-def lambda_event_stream(module, aws):
+def set_default_values(module, source_params):
+ _source_params_cpy = copy.deepcopy(source_params)
+
+ if module.params["event_source"].lower() == "sqs":
+ # Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
+ _source_params_cpy.setdefault("batch_size", 10)
+
+ if source_params["source_arn"].endswith(".fifo"):
+ if _source_params_cpy["batch_size"] > 10:
+ module.fail_json(msg="For FIFO queues the maximum batch_size is 10.")
+ if _source_params_cpy.get("maximum_batching_window_in_seconds"):
+ module.fail_json(
+ msg="maximum_batching_window_in_seconds is not supported by Amazon SQS FIFO event sources."
+ )
+ else:
+ if _source_params_cpy["batch_size"] >= 10000:
+ module.fail_json(msg="For standard queue batch_size must be between lower than 10000.")
+
+ elif module.params["event_source"].lower() == "stream":
+ # Default 100.
+ _source_params_cpy.setdefault("batch_size", 100)
+
+ if not (100 <= _source_params_cpy["batch_size"] <= 10000):
+ module.fail_json(msg="batch_size for streams must be between 100 and 10000")
+
+ if _source_params_cpy["batch_size"] > 10 and not _source_params_cpy.get("maximum_batching_window_in_seconds"):
+ _source_params_cpy["maximum_batching_window_in_seconds"] = 1
+
+ return _source_params_cpy
+
+
+def lambda_event_stream(module, client):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
@@ -310,7 +283,6 @@ def lambda_event_stream(module, aws):
:return:
"""
- client = aws.client("lambda")
facts = dict()
changed = False
current_state = "absent"
@@ -327,15 +299,8 @@ def lambda_event_stream(module, aws):
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
- # check if optional sub-parameters are valid, if present
- batch_size = source_params.get("batch_size")
- if batch_size:
- try:
- source_params["batch_size"] = int(batch_size)
- except ValueError:
- module.fail_json(
- msg=f"Source parameter 'batch_size' must be an integer, found: {source_params['batch_size']}"
- )
+ if state == "present":
+ source_params = set_default_values(module, source_params)
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get("enabled", "True"))
@@ -351,18 +316,21 @@ def lambda_event_stream(module, aws):
if state == "present":
if current_state == "absent":
starting_position = source_params.get("starting_position")
- if starting_position:
+ event_source = module.params.get("event_source")
+ if event_source == "stream":
+ if not starting_position:
+ module.fail_json(
+ msg="Source parameter 'starting_position' is required for stream event notification."
+ )
api_params.update(StartingPosition=starting_position)
- elif module.params.get("event_source") == "sqs":
- # starting position is not required for SQS
- pass
- else:
- module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
-
- if source_arn:
- api_params.update(Enabled=source_param_enabled)
+
+ api_params.update(Enabled=source_param_enabled)
if source_params.get("batch_size"):
api_params.update(BatchSize=source_params.get("batch_size"))
+ if source_params.get("maximum_batching_window_in_seconds"):
+ api_params.update(
+ MaximumBatchingWindowInSeconds=source_params.get("maximum_batching_window_in_seconds")
+ )
if source_params.get("function_response_types"):
api_params.update(FunctionResponseTypes=source_params.get("function_response_types"))
@@ -375,9 +343,8 @@ def lambda_event_stream(module, aws):
else:
# current_state is 'present'
- api_params = dict(FunctionName=module.params["lambda_function_arn"])
current_mapping = facts[0]
- api_params.update(UUID=current_mapping["UUID"])
+ api_params = dict(FunctionName=module.params["lambda_function_arn"], UUID=current_mapping["UUID"])
mapping_changed = False
# check if anything changed
@@ -426,7 +393,18 @@ def main():
state=dict(required=False, default="present", choices=["present", "absent"]),
lambda_function_arn=dict(required=True, aliases=["function_name", "function_arn"]),
event_source=dict(required=False, default="stream", choices=source_choices),
- source_params=dict(type="dict", required=True),
+ source_params=dict(
+ type="dict",
+ required=True,
+ options=dict(
+ source_arn=dict(type="str", required=True),
+ enabled=dict(type="bool", default=True),
+ batch_size=dict(type="int"),
+ starting_position=dict(type="str", choices=["TRIM_HORIZON", "LATEST"]),
+ function_response_types=dict(type="list", elements="str", choices=["ReportBatchItemFailures"]),
+ maximum_batching_window_in_seconds=dict(type="int"),
+ ),
+ ),
alias=dict(required=False, default=None),
version=dict(type="int", required=False, default=0),
)
@@ -438,12 +416,15 @@ def main():
required_together=[],
)
- aws = AWSConnection(module, ["lambda"])
+ try:
+ client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff())
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Trying to connect to AWS")
- validate_params(module, aws)
+ validate_params(module, client)
if module.params["event_source"].lower() in ("stream", "sqs"):
- results = lambda_event_stream(module, aws)
+ results = lambda_event_stream(module, client)
else:
module.fail_json(msg="Please select `stream` or `sqs` as the event type")
diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
index 83ba4feaa..fbd443bb7 100644
--- a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py
@@ -95,7 +95,7 @@ functions:
elements: str
architectures:
description: The architectures supported by the function.
- returned: successful run where botocore >= 1.21.51
+ returned: success
type: list
elements: str
sample: ['arm64']
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
index 0e5634e59..30a7145e7 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py
@@ -170,7 +170,6 @@ options:
- For the full list of DB instance classes and availability for your engine visit
U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html).
- This setting is required to create a Multi-AZ DB cluster.
- - I(db_cluster_instance_class) require botocore >= 1.23.44.
type: str
version_added: 5.5.0
enable_iam_database_authentication:
@@ -182,7 +181,6 @@ options:
description:
- The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.
- This setting is required to create a Multi-AZ DB cluster.
- - I(allocated_storage) require botocore >= 1.23.44.
type: int
version_added: 5.5.0
storage_type:
@@ -190,7 +188,6 @@ options:
- Specifies the storage type to be associated with the DB cluster.
- This setting is required to create a Multi-AZ DB cluster.
- When specified, a value for the I(iops) parameter is required.
- - I(storage_type) require botocore >= 1.23.44.
- Defaults to C(io1).
type: str
choices:
@@ -201,7 +198,6 @@ options:
- The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.
- This setting is required to create a Multi-AZ DB cluster
- Must be a multiple between .5 and 50 of the storage amount for the DB cluster.
- - I(iops) require botocore >= 1.23.44.
type: int
version_added: 5.5.0
engine:
@@ -1174,7 +1170,7 @@ def ensure_present(cluster, parameters, method_name, method_options_name):
return changed
-def handle_remove_from_global_db(module, cluster):
+def handle_remove_from_global_db(cluster):
global_cluster_id = module.params.get("global_cluster_identifier")
db_cluster_id = module.params.get("db_cluster_identifier")
db_cluster_arn = cluster["DBClusterArn"]
@@ -1361,7 +1357,7 @@ def main():
if method_name == "delete_db_cluster":
if cluster and module.params.get("remove_from_global_db"):
if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]:
- changed = handle_remove_from_global_db(module, cluster)
+ changed = handle_remove_from_global_db(cluster)
call_method(client, module, method_name, eval(method_options_name)(parameters))
changed = True
@@ -1377,7 +1373,7 @@ def main():
if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]:
if changed:
wait_for_cluster_status(client, module, cluster_id, "cluster_available")
- changed |= handle_remove_from_global_db(module, cluster)
+ changed |= handle_remove_from_global_db(cluster)
result = camel_dict_to_snake_dict(get_cluster(cluster_id))
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py
new file mode 100644
index 000000000..dc94bca1a
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: rds_cluster_param_group
+version_added: 7.6.0
+short_description: Manage RDS cluster parameter groups
+description:
+ - Creates, modifies, and deletes RDS cluster parameter groups.
+options:
+ state:
+ description:
+ - Specifies whether the RDS cluster parameter group should be present or absent.
+ default: present
+ choices: [ 'present' , 'absent' ]
+ type: str
+ name:
+ description:
+ - The name of the RDS cluster parameter group to create, modify or delete.
+ required: true
+ type: str
+ description:
+ description:
+ - The description for the RDS cluster parameter group.
+ - Required for O(state=present).
+ type: str
+ db_parameter_group_family:
+ description:
+ - The RDS cluster parameter group family name.
+ - An RDS cluster parameter group can be associated with one and only one RDS cluster parameter group family,
+ and can be applied only to a RDS cluster running a database engine and engine version compatible with that RDS cluster parameter group family.
+ - Please use M(amazon.aws.rds_engine_versions_info) module To list all of the available parameter group families for a DB engine.
+ - The RDS cluster parameter group family is immutable and can't be changed when updating a RDS cluster parameter group.
+ See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbclusterparametergroup.html)
+ - Required for O(state=present).
+ type: str
+ parameters:
+ description:
+ - A list of parameters to update.
+ type: list
+ elements: dict
+ suboptions:
+ parameter_name:
+ description: Specifies the name of the parameter.
+ type: str
+ required: true
+ parameter_value:
+ description:
+ - Specifies the value of the parameter.
+ type: str
+ required: true
+ apply_method:
+ description:
+ - Indicates when to apply parameter updates.
+ choices:
+ - immediate
+ - pending-reboot
+ type: str
+ required: true
+author:
+ - "Aubin Bikouo (@abikouo)"
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Add or change a parameter group, in this case setting authentication_timeout to 200
+ amazon.aws.rds_cluster_param_group:
+ state: present
+ name: test-cluster-group
+ description: 'My test RDS cluster group'
+ db_parameter_group_family: 'mysql5.6'
+ parameters:
+ - parameter_name: authentication_timeout
+ parameter_value: "200"
+ apply_method: immediate
+ tags:
+ Environment: production
+ Application: parrot
+
+- name: Remove a parameter group
+ amazon.aws.rds_param_group:
+ state: absent
+ name: test-cluster-group
+"""
+
+RETURN = r"""
+db_cluster_parameter_group:
+ description: dictionary containing all the RDS cluster parameter group information
+ returned: success
+ type: complex
+ contains:
+ db_cluster_parameter_group_arn:
+ description: The Amazon Resource Name (ARN) for the RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ db_cluster_parameter_group_name:
+ description: The name of the RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ db_parameter_group_family:
+ description: The name of the RDS parameter group family that this RDS cluster parameter group is compatible with.
+ type: str
+ returned: when state is present
+ description:
+ description: Provides the customer-specified description for this RDS cluster parameter group.
+ type: str
+ returned: when state is present
+ tags:
+ description: dictionary of tags
+ type: dict
+ returned: when state is present
+"""
+
+from itertools import zip_longest
+from typing import Any
+from typing import Dict
+from typing import List
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameter_groups
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
+
+
+def modify_parameters(
+ module: AnsibleAWSModule, connection: Any, group_name: str, parameters: List[Dict[str, Any]]
+) -> bool:
+ current_params = describe_db_cluster_parameters(module, connection, group_name)
+ parameters = snake_dict_to_camel_dict(parameters, capitalize_first=True)
+ # compare current resource parameters with the value from module parameters
+ changed = False
+ for param in parameters:
+ found = False
+ for current_p in current_params:
+ if param.get("ParameterName") == current_p.get("ParameterName"):
+ found = True
+ if not current_p["IsModifiable"]:
+ module.fail_json(f"The parameter {param.get('ParameterName')} cannot be modified")
+ changed |= any((current_p.get(k) != v for k, v in param.items()))
+ if not found:
+ module.fail_json(msg=f"Could not find parameter with name: {param.get('ParameterName')}")
+ if changed:
+ if not module.check_mode:
+ # When calling modify_db_cluster_parameter_group() function
+ # A maximum of 20 parameters can be modified in a single request.
+ # This is why we are creating chunk containing at max 20 items
+ for chunk in zip_longest(*[iter(parameters)] * 20, fillvalue=None):
+ non_empty_chunk = [item for item in chunk if item]
+ try:
+ connection.modify_db_cluster_parameter_group(
+ aws_retry=True, DBClusterParameterGroupName=group_name, Parameters=non_empty_chunk
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't update RDS cluster parameters")
+ return changed
+
+
+def ensure_present(module: AnsibleAWSModule, connection: Any) -> None:
+ group_name = module.params["name"]
+ db_parameter_group_family = module.params["db_parameter_group_family"]
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ changed = False
+
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group_name)
+ if not response:
+ # Create RDS cluster parameter group
+ params = dict(
+ DBClusterParameterGroupName=group_name,
+ DBParameterGroupFamily=db_parameter_group_family,
+ Description=module.params["description"],
+ )
+ if tags:
+ params["Tags"] = ansible_dict_to_boto3_tag_list(tags)
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Would have create RDS parameter group if not in check mode.")
+ try:
+ response = connection.create_db_cluster_parameter_group(aws_retry=True, **params)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create parameter group")
+ else:
+ group = response[0]
+ if db_parameter_group_family != group["DBParameterGroupFamily"]:
+ module.warn(
+ "The RDS cluster parameter group family is immutable and can't be changed when updating a RDS cluster parameter group."
+ )
+
+ if tags:
+ existing_tags = get_tags(connection, module, group["DBClusterParameterGroupArn"])
+ changed = ensure_tags(
+ connection, module, group["DBClusterParameterGroupArn"], existing_tags, tags, purge_tags
+ )
+
+ parameters = module.params.get("parameters")
+ if parameters:
+ changed |= modify_parameters(module, connection, group_name, parameters)
+
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group_name)
+ group = camel_dict_to_snake_dict(response[0])
+ group["tags"] = get_tags(connection, module, group["db_cluster_parameter_group_arn"])
+
+ module.exit_json(changed=changed, db_cluster_parameter_group=group)
+
+
+def ensure_absent(module: AnsibleAWSModule, connection: Any) -> None:
+ group = module.params["name"]
+ response = describe_db_cluster_parameter_groups(module=module, connection=connection, group_name=group)
+ if not response:
+ module.exit_json(changed=False, msg="The RDS cluster parameter group does not exist.")
+
+ if not module.check_mode:
+ try:
+ response = connection.delete_db_cluster_parameter_group(aws_retry=True, DBClusterParameterGroupName=group)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete RDS cluster parameter group")
+ module.exit_json(changed=True)
+
+
+def main() -> None:
+ argument_spec = dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(required=True),
+ db_parameter_group_family=dict(),
+ description=dict(),
+ tags=dict(type="dict", aliases=["resource_tags"]),
+ purge_tags=dict(type="bool", default=True),
+ parameters=dict(
+ type="list",
+ elements="dict",
+ options=dict(
+ parameter_name=dict(required=True),
+ parameter_value=dict(required=True),
+ apply_method=dict(choices=["immediate", "pending-reboot"], required=True),
+ ),
+ ),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=[["state", "present", ["description", "db_parameter_group_family"]]],
+ supports_check_mode=True,
+ )
+
+ try:
+ connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff())
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ if module.params.get("state") == "present":
+ ensure_present(module=module, connection=connection)
+ else:
+ ensure_absent(module=module, connection=connection)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py
new file mode 100644
index 000000000..bad0433a7
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_param_group_info.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Aubin Bikouo (@abikouo)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+module: rds_cluster_param_group_info
+version_added: 7.6.0
+short_description: Describes the properties of specific RDS cluster parameter group.
+description:
+ - Obtain information about a list or one specific RDS cluster parameter group.
+options:
+ name:
+ description:
+ - The RDS cluster parameter group name.
+ type: str
+ include_parameters:
+ description:
+ - Specifies whether to include the detailed parameters of the RDS cluster parameter group.
+ - V(all) include all parameters.
+ - V(engine-default) include engine-default parameters.
+ - V(system) include system parameters.
+ - V(user) include user parameters.
+ type: str
+ choices:
+ - all
+ - engine-default
+ - system
+ - user
+author:
+ - Aubin Bikouo (@abikouo)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: Describe a specific RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: myrdsclustergroup
+
+- name: Describe all RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group_info:
+
+- name: Describe a specific RDS cluster parameter group including user parameters
+ amazon.aws.rds_cluster_param_group_info:
+ name: myrdsclustergroup
+ include_parameters: user
+"""
+
+RETURN = r"""
+db_cluster_parameter_groups:
+ description: List of RDS cluster parameter groups.
+ returned: always
+ type: list
+ contains:
+ db_cluster_parameter_group_name:
+ description:
+ - The name of the RDS cluster parameter group.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of the RDS parameter group family that this RDS cluster parameter group is compatible with.
+ type: str
+ description:
+ description:
+ - Provides the customer-specified description for this RDS cluster parameter group.
+ type: str
+ db_cluster_parameter_group_arn:
+ description:
+ - The Amazon Resource Name (ARN) for the RDS cluster parameter group.
+ type: str
+ db_parameters:
+ description:
+ - Provides a list of parameters for the RDS cluster parameter group.
+ returned: When O(include_parameters) is set
+ type: list
+ elements: dict
+ sample: [
+ {
+ "allowed_values": "1-600",
+ "apply_method": "pending-reboot",
+ "apply_type": "dynamic",
+ "data_type": "integer",
+ "description": "(s) Sets the maximum allowed time to complete client authentication.",
+ "is_modifiable": true,
+ "parameter_name": "authentication_timeout",
+ "parameter_value": "100",
+ "source": "user",
+ "supported_engine_modes": [
+ "provisioned"
+ ]
+ }
+ ]
+ tags:
+ description: A dictionary of key value pairs.
+ type: dict
+ sample: {
+ "Name": "rds-cluster-demo"
+ }
+"""
+
+from typing import Any
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameter_groups
+from ansible_collections.amazon.aws.plugins.module_utils.rds import describe_db_cluster_parameters
+from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+
+def describe_rds_cluster_parameter_group(connection: Any, module: AnsibleAWSModule) -> None:
+ group_name = module.params.get("name")
+ include_parameters = module.params.get("include_parameters")
+ results = []
+ response = describe_db_cluster_parameter_groups(module, connection, group_name)
+ if response:
+ for resource in response:
+ resource["tags"] = get_tags(connection, module, resource["DBClusterParameterGroupArn"])
+ if include_parameters is not None:
+ resource["db_parameters"] = describe_db_cluster_parameters(
+ module, connection, resource["DBClusterParameterGroupName"], include_parameters
+ )
+ results.append(camel_dict_to_snake_dict(resource, ignore_list=["tags"]))
+ module.exit_json(changed=False, db_cluster_parameter_groups=results)
+
+
+def main() -> None:
+ argument_spec = dict(
+ name=dict(),
+ include_parameters=dict(choices=["user", "all", "system", "engine-default"]),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ describe_rds_cluster_parameter_group(client, module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py
new file mode 100644
index 000000000..c2391946c
--- /dev/null
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_engine_versions_info.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024 Aubin Bikouo (@abikouo)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+module: rds_engine_versions_info
+version_added: 7.6.0
+short_description: Describes the properties of specific versions of DB engines.
+description:
+ - Obtain information about a specific versions of DB engines.
+options:
+ engine:
+ description:
+ - The database engine to return version details for.
+ type: str
+ choices:
+ - aurora-mysql
+ - aurora-postgresql
+ - custom-oracle-ee
+ - db2-ae
+ - db2-se
+ - mariadb
+ - mysql
+ - oracle-ee
+ - oracle-ee-cdb
+ - oracle-se2
+ - oracle-se2-cdb
+ - postgres
+ - sqlserver-ee
+ - sqlserver-se
+ - sqlserver-ex
+ - sqlserver-web
+ engine_version:
+ description:
+ - A specific database engine version to return details for.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of a specific RDS parameter group family to return details for.
+ type: str
+ default_only:
+ description:
+ - Specifies whether to return only the default version of the specified engine
+ or the engine and major version combination.
+ type: bool
+ default: False
+ filters:
+ description:
+ - A filter that specifies one or more DB engine versions to describe.
+ See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBEngineVersions.html).
+ type: dict
+author:
+ - Aubin Bikouo (@abikouo)
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.boto3
+"""
+
+EXAMPLES = r"""
+- name: List all of the available parameter group families for the Aurora PostgreSQL DB engine
+ amazon.aws.rds_engine_versions_info:
+ engine: aurora-postgresql
+
+- name: List all of the available parameter group families for the Aurora PostgreSQL DB engine on a specific version
+ amazon.aws.rds_engine_versions_info:
+ engine: aurora-postgresql
+ engine_version: 16.1
+
+- name: Get default engine version for DB parameter group family postgres16
+ amazon.aws.rds_engine_versions_info:
+ engine: postgres
+ default_only: true
+ db_parameter_group_family: postgres16
+"""
+
+RETURN = r"""
+db_engine_versions:
+ description: List of RDS engine versions.
+ returned: always
+ type: list
+ contains:
+ engine:
+ description:
+ - The name of the database engine.
+ type: str
+ engine_version:
+ description:
+ - The version number of the database engine.
+ type: str
+ db_parameter_group_family:
+ description:
+ - The name of the DB parameter group family for the database engine.
+ type: str
+ db_engine_description:
+ description:
+ - The description of the database engine.
+ type: str
+ db_engine_version_description:
+ description:
+ - The description of the database engine version.
+ type: str
+ default_character_set:
+ description:
+ - The default character set for new instances of this engine version.
+ type: dict
+ sample: {
+ "character_set_description": "Unicode 5.0 UTF-8 Universal character set",
+ "character_set_name": "AL32UTF8"
+ }
+ image:
+ description:
+ - The EC2 image
+ type: complex
+ contains:
+ image_id:
+ description:
+ - A value that indicates the ID of the AMI.
+ type: str
+ status:
+ description:
+ - A value that indicates the status of a custom engine version (CEV).
+ type: str
+ db_engine_media_type:
+ description:
+ - A value that indicates the source media provider of the AMI based on the usage operation.
+ type: str
+ supported_character_sets:
+ description:
+ - A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance operation.
+ type: list
+ elements: dict
+ contains:
+ character_set_name:
+ description:
+ - The name of the character set.
+ type: str
+ character_set_description:
+ description:
+ - The description of the character set.
+ type: str
+ supported_nchar_character_sets:
+ description:
+ - A list of the character sets supported by the Oracle DB engine.
+ type: list
+ elements: dict
+ contains:
+ character_set_name:
+ description:
+ - The name of the character set.
+ type: str
+ character_set_description:
+ description:
+ - The description of the character set.
+ type: str
+ valid_upgrade_target:
+ description:
+ - A list of engine versions that this database engine version can be upgraded to.
+ type: list
+ elements: dict
+ sample: [
+ {
+ "auto_upgrade": false,
+ "description": "Aurora PostgreSQL (Compatible with PostgreSQL 15.5)",
+ "engine": "aurora-postgresql",
+ "engine_version": "15.5",
+ "is_major_version_upgrade": false,
+ "supported_engine_modes": [
+ "provisioned"
+ ],
+ "supports_babelfish": true,
+ "supports_global_databases": true,
+ "supports_integrations": false,
+ "supports_local_write_forwarding": true,
+ "supports_parallel_query": false
+ }
+ ]
+ supported_timezones:
+ description:
+ - A list of the time zones supported by this engine for the Timezone parameter of the CreateDBInstance action.
+ type: list
+ elements: dict
+ sample: [
+ {"TimezoneName": "xxx"}
+ ]
+ exportable_log_types:
+ description:
+ - The types of logs that the database engine has available for export to CloudWatch Logs.
+ type: list
+ elements: str
+ supports_log_exports_to_cloudwatchLogs:
+ description:
+ - Indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.
+ type: bool
+ supports_read_replica:
+ description:
+ - Indicates whether the database engine version supports read replicas.
+ type: bool
+ supported_engine_modes:
+ description:
+ - A list of the supported DB engine modes.
+ type: list
+ elements: str
+ supported_feature_names:
+ description:
+ - A list of features supported by the DB engine.
+ type: list
+ elements: str
+ sample: [
+ "Comprehend",
+ "Lambda",
+ "s3Export",
+ "s3Import",
+ "SageMaker"
+ ]
+ status:
+ description:
+ - The status of the DB engine version, either available or deprecated.
+ type: str
+ supports_parallel_query:
+ description:
+ - Indicates whether you can use Aurora parallel query with a specific DB engine version.
+ type: bool
+ supports_global_databases:
+ description:
+ - Indicates whether you can use Aurora global databases with a specific DB engine version.
+ type: bool
+ major_engine_version:
+ description:
+ - The major engine version of the CEV.
+ type: str
+ database_installation_files_s3_bucket_name:
+ description:
+ - The name of the Amazon S3 bucket that contains your database installation files.
+ type: str
+ database_installation_files_s3_prefix:
+ description:
+ - The Amazon S3 directory that contains the database installation files.
+ type: str
+ db_engine_version_arn:
+ description:
+ - The ARN of the custom engine version.
+ type: str
+ kms_key_id:
+ description:
+ - The Amazon Web Services KMS key identifier for an encrypted CEV.
+ type: str
+ create_time:
+ description:
+ - The creation time of the DB engine version.
+ type: str
+ tags:
+ description: A dictionary of key value pairs.
+ type: dict
+ sample: {
+ "some": "tag"
+ }
+ supports_babelfish:
+ description:
+ - Indicates whether the engine version supports Babelfish for Aurora PostgreSQL.
+ type: bool
+ custom_db_engine_version_manifest:
+ description:
+ - JSON string that lists the installation files and parameters that RDS Custom uses to create a custom engine version (CEV).
+ type: str
+ supports_certificate_rotation_without_restart:
+ description:
+ - Indicates whether the engine version supports rotating the server certificate without rebooting the DB instance.
+ type: bool
+ supported_ca_certificate_identifiers:
+ description:
+ - A list of the supported CA certificate identifiers.
+ type: list
+ elements: str
+ sample: [
+ "rds-ca-2019",
+ "rds-ca-ecc384-g1",
+ "rds-ca-rsa4096-g1",
+ "rds-ca-rsa2048-g1"
+ ]
+ supports_local_write_forwarding:
+ description:
+ - Indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster.
+ type: bool
+ supports_integrations:
+ description:
+ - Indicates whether the DB engine version supports zero-ETL integrations with Amazon Redshift.
+ type: bool
+"""
+
+from typing import Any
+from typing import Dict
+from typing import List
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
+
+
+@AWSRetry.jittered_backoff(retries=10)
+def _describe_db_engine_versions(connection: Any, **params: Dict[str, Any]) -> List[Dict[str, Any]]:
+ paginator = connection.get_paginator("describe_db_engine_versions")
+ return paginator.paginate(**params).build_full_result()["DBEngineVersions"]
+
+
+def describe_db_engine_versions(connection: Any, module: AnsibleAWSModule) -> Dict[str, Any]:
+ engine = module.params.get("engine")
+ engine_version = module.params.get("engine_version")
+ db_parameter_group_family = module.params.get("db_parameter_group_family")
+ default_only = module.params.get("default_only")
+ filters = module.params.get("filters")
+
+ params = {"DefaultOnly": default_only}
+ if engine:
+ params["Engine"] = engine
+ if engine_version:
+ params["EngineVersion"] = engine_version
+ if db_parameter_group_family:
+ params["DBParameterGroupFamily"] = db_parameter_group_family
+ if filters:
+ params["Filters"] = filters
+
+ try:
+ result = _describe_db_engine_versions(connection, **params)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Couldn't get RDS engine versions.")
+
+ def _transform_item(v):
+ tag_list = v.pop("TagList", [])
+ v = camel_dict_to_snake_dict(v)
+ v["tags"] = boto3_tag_list_to_ansible_dict(tag_list)
+ return v
+
+ return dict(changed=False, db_engine_versions=[_transform_item(v) for v in result])
+
+
+def main() -> None:
+ argument_spec = dict(
+ engine=dict(
+ choices=[
+ "aurora-mysql",
+ "aurora-postgresql",
+ "custom-oracle-ee",
+ "db2-ae",
+ "db2-se",
+ "mariadb",
+ "mysql",
+ "oracle-ee",
+ "oracle-ee-cdb",
+ "oracle-se2",
+ "oracle-se2-cdb",
+ "postgres",
+ "sqlserver-ee",
+ "sqlserver-se",
+ "sqlserver-ex",
+ "sqlserver-web",
+ ]
+ ),
+ engine_version=dict(),
+ db_parameter_group_family=dict(),
+ default_only=dict(type="bool", default=False),
+ filters=dict(type="dict"),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS.")
+
+ module.exit_json(**describe_db_engine_versions(client, module))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
index 4451d7638..0362df0ba 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py
@@ -43,7 +43,9 @@ options:
type: bool
default: false
purge_cloudwatch_logs_exports:
- description: Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ description:
+ - Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
+ - Set I(enable_cloudwatch_logs_exports) to an empty list to disable all.
type: bool
default: true
read_replica:
@@ -1028,7 +1030,7 @@ def get_options_with_changing_values(client, module, parameters):
parameters["DBPortNumber"] = port
if not force_update_password:
parameters.pop("MasterUserPassword", None)
- if cloudwatch_logs_enabled:
+ if cloudwatch_logs_enabled is not None:
parameters["CloudwatchLogsExportConfiguration"] = cloudwatch_logs_enabled
if not module.params["storage_type"]:
parameters.pop("Iops", None)
@@ -1162,8 +1164,7 @@ def get_current_attributes_with_inconsistent_keys(instance):
def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs, purge_security_groups):
changing_params = {}
current_options = get_current_attributes_with_inconsistent_keys(instance)
- for option in current_options:
- current_option = current_options[option]
+ for option, current_option in current_options.items():
desired_option = modify_params.pop(option, None)
if desired_option is None:
continue
@@ -1565,8 +1566,7 @@ def main():
instance = get_instance(client, module, instance_id)
if instance:
break
- else:
- sleep(5)
+ sleep(5)
if state == "absent" and changed and not module.params["skip_final_snapshot"]:
instance.update(
diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py
index abdb57c9b..82d0112fd 100644
--- a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py
+++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_param_group.py
@@ -6,7 +6,7 @@
DOCUMENTATION = r"""
---
-module: rds_param_group
+module: rds_instance_param_group
version_added: 5.0.0
short_description: manage RDS parameter groups
description:
@@ -31,8 +31,7 @@ options:
engine:
description:
- The type of database for this group.
- - Please use following command to get list of all supported db engines and their respective versions.
- - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
+ - Please use M(amazon.aws.rds_engine_versions_info) to get list of all supported db engines and their respective versions.
- The DB parameter group family is immutable and can't be changed when updating a DB parameter group.
See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html)
- Required for I(state=present).
@@ -61,7 +60,7 @@ extends_documentation_fragment:
EXAMPLES = r"""
- name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
state: present
name: norwegian-blue
description: 'My Fancy Ex Parrot Group'
@@ -73,7 +72,7 @@ EXAMPLES = r"""
Application: parrot
- name: Remove a parameter group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
state: absent
name: norwegian-blue
"""
@@ -149,9 +148,9 @@ def convert_parameter(param, value):
if param["DataType"] == "integer":
if isinstance(value, string_types):
try:
- for modifier in INT_MODIFIERS.keys():
- if value.endswith(modifier):
- converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
+ for name, modifier in INT_MODIFIERS.items():
+ if value.endswith(name):
+ converted_value = int(value[:-1]) * modifier
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to the AWS SDK
diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
index 369c7c774..b2924145d 100644
--- a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
+++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py
@@ -535,21 +535,21 @@ def update_health_check(existing_check):
return True, "update", check_id
-def describe_health_check(id):
- if not id:
+def describe_health_check(check_id):
+ if not check_id:
return dict()
try:
result = client.get_health_check(
aws_retry=True,
- HealthCheckId=id,
+ HealthCheckId=check_id,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get health check.", id=id)
+ module.fail_json_aws(e, msg="Failed to get health check.", id=check_id)
health_check = result.get("HealthCheck", {})
health_check = camel_dict_to_snake_dict(health_check)
- tags = get_tags(module, client, "healthcheck", id)
+ tags = get_tags(module, client, "healthcheck", check_id)
health_check["tags"] = tags
return health_check
@@ -705,7 +705,7 @@ def main():
if check_id:
changed |= manage_tags(module, client, "healthcheck", check_id, tags, purge_tags)
- health_check = describe_health_check(id=check_id)
+ health_check = describe_health_check(check_id)
health_check["action"] = action
module.exit_json(
changed=changed,
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
index d68223ede..d259286f9 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py
@@ -352,6 +352,9 @@ acl:
import json
import time
+from typing import Iterator
+from typing import List
+from typing import Tuple
try:
import botocore
@@ -372,48 +375,22 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
-def create_or_update_bucket(s3_client, module):
- policy = module.params.get("policy")
- name = module.params.get("name")
- requester_pays = module.params.get("requester_pays")
- tags = module.params.get("tags")
- purge_tags = module.params.get("purge_tags")
+def handle_bucket_versioning(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage versioning for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle versioning for.
+ Returns:
+ A tuple containing a boolean indicating whether versioning
+ was changed and a dictionary containing the updated versioning status.
+ """
versioning = module.params.get("versioning")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
- bucket_key_enabled = module.params.get("bucket_key_enabled")
- public_access = module.params.get("public_access")
- delete_public_access = module.params.get("delete_public_access")
- delete_object_ownership = module.params.get("delete_object_ownership")
- object_ownership = module.params.get("object_ownership")
- object_lock_enabled = module.params.get("object_lock_enabled")
- acl = module.params.get("acl")
- # default to US Standard region,
- # note: module.region will also try to pull a default out of the boto3 configs.
- location = module.region or "us-east-1"
-
- changed = False
- result = {}
+ versioning_changed = False
+ versioning_status = {}
try:
- bucket_is_present = bucket_exists(s3_client, name)
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- try:
- bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled)
- s3_client.get_waiter("bucket_exists").wait(Bucket=name)
- changed = changed or bucket_changed
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed while creating bucket")
-
- # Versioning
- try:
versioning_status = get_bucket_versioning(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
if versioning is not None:
@@ -438,19 +415,34 @@ def create_or_update_bucket(s3_client, module):
if required_versioning:
try:
put_bucket_versioning(s3_client, name, required_versioning)
- changed = True
+ versioning_changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket versioning")
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
- # This output format is there to ensure compatibility with previous versions of the module
- result["versioning"] = {
+ versioning_result = {
"Versioning": versioning_status.get("Status", "Disabled"),
"MfaDelete": versioning_status.get("MFADelete", "Disabled"),
}
+ # This output format is there to ensure compatibility with previous versions of the module
+ return versioning_changed, versioning_result
- # Requester pays
+
+def handle_bucket_requester_pays(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage requester pays setting for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle requester pays setting for.
+ Returns:
+ A tuple containing a boolean indicating whether requester pays setting
+ was changed and a dictionary containing the updated requester pays status.
+ """
+ requester_pays = module.params.get("requester_pays")
+ requester_pays_changed = False
+ requester_pays_status = {}
try:
requester_pays_status = get_bucket_request_payment(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -476,11 +468,27 @@ def create_or_update_bucket(s3_client, module):
# account, so we retry one more time
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
- changed = True
+ requester_pays_changed = True
- result["requester_pays"] = requester_pays
+ return requester_pays_changed, requester_pays
+
+
+def handle_bucket_public_access_config(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage public access configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle public access configuration for.
+ Returns:
+ A tuple containing a boolean indicating whether public access configuration
+ was changed and a dictionary containing the updated public access configuration.
+ """
+ public_access = module.params.get("public_access")
+ delete_public_access = module.params.get("delete_public_access")
+ public_access_changed = False
+ public_access_result = {}
- # Public access clock configuration
current_public_access = {}
try:
current_public_access = get_bucket_public_access(s3_client, name)
@@ -502,22 +510,38 @@ def create_or_update_bucket(s3_client, module):
camel_public_block = snake_dict_to_camel_dict(public_access, capitalize_first=True)
if current_public_access == camel_public_block:
- result["public_access_block"] = current_public_access
+ public_access_result = current_public_access
else:
put_bucket_public_access(s3_client, name, camel_public_block)
- changed = True
- result["public_access_block"] = camel_public_block
+ public_access_changed = True
+ public_access_result = camel_public_block
# -- Delete public access block
if delete_public_access:
if current_public_access == {}:
- result["public_access_block"] = current_public_access
+ public_access_result = current_public_access
else:
delete_bucket_public_access(s3_client, name)
- changed = True
- result["public_access_block"] = {}
+ public_access_changed = True
+ public_access_result = {}
- # Policy
+ # Return the result
+ return public_access_changed, public_access_result
+
+
+def handle_bucket_policy(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage bucket policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle the policy for.
+ Returns:
+ A tuple containing a boolean indicating whether the bucket policy
+ was changed and a dictionary containing the updated bucket policy.
+ """
+ policy = module.params.get("policy")
+ policy_changed = False
try:
current_policy = get_bucket_policy(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -543,7 +567,7 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
- changed = True
+ policy_changed = True
elif compare_policies(current_policy, policy):
try:
put_bucket_policy(s3_client, name, policy)
@@ -555,11 +579,26 @@ def create_or_update_bucket(s3_client, module):
# account, so we retry one more time
put_bucket_policy(s3_client, name, policy)
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
- changed = True
+ policy_changed = True
- result["policy"] = current_policy
+ return policy_changed, current_policy
+
+
+def handle_bucket_tags(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage tags for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle tags for.
+ Returns:
+ A tuple containing a boolean indicating whether tags were changed
+ and a dictionary containing the updated tags.
+ """
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ bucket_tags_changed = False
- # Tags
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -596,11 +635,27 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket tags")
current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
- changed = True
+ bucket_tags_changed = True
- result["tags"] = current_tags_dict
+ return bucket_tags_changed, current_tags_dict
+
+
+def handle_bucket_encryption(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage encryption settings for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle encryption for.
+ Returns:
+ A tuple containing a boolean indicating whether encryption settings
+ were changed and a dictionary containing the updated encryption settings.
+ """
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ bucket_key_enabled = module.params.get("bucket_key_enabled")
+ encryption_changed = False
- # Encryption
try:
current_encryption = get_bucket_encryption(s3_client, name)
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
@@ -626,7 +681,7 @@ def create_or_update_bucket(s3_client, module):
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket encryption")
current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
- changed = True
+ encryption_changed = True
else:
if (encryption != current_encryption_algorithm) or (
encryption == "aws:kms" and current_encryption_key != encryption_key_id
@@ -635,24 +690,37 @@ def create_or_update_bucket(s3_client, module):
if encryption == "aws:kms" and encryption_key_id is not None:
expected_encryption.update({"KMSMasterKeyID": encryption_key_id})
current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
- changed = True
+ encryption_changed = True
if bucket_key_enabled is not None:
current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None
if current_encryption_algorithm == "aws:kms":
if get_bucket_key(s3_client, name) != bucket_key_enabled:
- if bucket_key_enabled:
- expected_encryption = True
- else:
- expected_encryption = False
+ expected_encryption = bool(bucket_key_enabled)
current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption)
- changed = True
- result["encryption"] = current_encryption
+ encryption_changed = True
- # -- Bucket ownership
+ return encryption_changed, current_encryption
+
+
+def handle_bucket_ownership(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage ownership settings for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle ownership for.
+ Returns:
+ A tuple containing a boolean indicating whether ownership settings were changed
+ and a dictionary containing the updated ownership settings.
+ """
+ delete_object_ownership = module.params.get("delete_object_ownership")
+ object_ownership = module.params.get("object_ownership")
+ bucket_ownership_changed = False
+ bucket_ownership_result = {}
try:
bucket_ownership = get_bucket_ownership_cntrl(s3_client, name)
- result["object_ownership"] = bucket_ownership
+ bucket_ownership_result = bucket_ownership
except KeyError as e:
# Some non-AWS providers appear to return policy documents that aren't
# compatible with AWS, cleanly catch KeyError so users can continue to use
@@ -676,21 +744,36 @@ def create_or_update_bucket(s3_client, module):
# delete S3 buckect ownership
if bucket_ownership is not None:
delete_bucket_ownership(s3_client, name)
- changed = True
- result["object_ownership"] = None
+ bucket_ownership_changed = True
+ bucket_ownership_result = None
elif object_ownership is not None:
# update S3 bucket ownership
if bucket_ownership != object_ownership:
put_bucket_ownership(s3_client, name, object_ownership)
- changed = True
- result["object_ownership"] = object_ownership
+ bucket_ownership_changed = True
+ bucket_ownership_result = object_ownership
- # -- Bucket ACL
+ return bucket_ownership_changed, bucket_ownership_result
+
+
+def handle_bucket_acl(s3_client, module: AnsibleAWSModule, name: str) -> tuple[bool, dict]:
+ """
+ Manage Access Control List (ACL) for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle ACL for.
+ Returns:
+ A tuple containing a boolean indicating whether ACL was changed and a dictionary containing the updated ACL.
+ """
+ acl = module.params.get("acl")
+ bucket_acl_changed = False
+ bucket_acl_result = {}
if acl:
try:
s3_client.put_bucket_acl(Bucket=name, ACL=acl)
- result["acl"] = acl
- changed = True
+ bucket_acl_result = acl
+ bucket_acl_changed = True
except KeyError as e:
# Some non-AWS providers appear to return policy documents that aren't
# compatible with AWS, cleanly catch KeyError so users can continue to use
@@ -706,17 +789,31 @@ def create_or_update_bucket(s3_client, module):
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to update bucket ACL")
- # -- Object Lock
+ return bucket_acl_changed, bucket_acl_result
+
+
+def handle_bucket_object_lock(s3_client, module: AnsibleAWSModule, name: str) -> dict:
+ """
+ Manage object lock configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the bucket to handle object lock for.
+ Returns:
+ The updated object lock configuration.
+ """
+ object_lock_enabled = module.params.get("object_lock_enabled")
+ object_lock_result = {}
try:
object_lock_status = get_bucket_object_lock_enabled(s3_client, name)
- result["object_lock_enabled"] = object_lock_status
+ object_lock_result = object_lock_status
except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e:
if object_lock_enabled is not None:
module.fail_json(msg="Fetching bucket object lock state is not supported")
except is_boto3_error_code("ObjectLockConfigurationNotFoundError"): # pylint: disable=duplicate-except
if object_lock_enabled:
module.fail_json(msg="Enabling object lock for existing buckets is not supported")
- result["object_lock_enabled"] = False
+ object_lock_result = False
except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except
if object_lock_enabled is not None:
module.fail_json(msg="Permission denied fetching object lock state for bucket")
@@ -732,21 +829,128 @@ def create_or_update_bucket(s3_client, module):
if object_lock_enabled and not object_lock_status:
module.fail_json(msg="Enabling object lock for existing buckets is not supported")
+ return object_lock_result
+
+
+def create_or_update_bucket(s3_client, module: AnsibleAWSModule):
+ """
+ Create or update an S3 bucket along with its associated configurations.
+ This function creates a new S3 bucket if it does not already exist, and updates its configurations,
+ such as versioning, requester pays, public access block configuration, policy, tags, encryption, bucket ownership,
+ ACL, and object lock settings. It returns whether any changes were made and the updated configurations.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ Returns:
+ None
+ """
+ name = module.params.get("name")
+ object_lock_enabled = module.params.get("object_lock_enabled")
+ # default to US Standard region,
+ # note: module.region will also try to pull a default out of the boto3 configs.
+ location = module.region or "us-east-1"
+
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled)
+ s3_client.get_waiter("bucket_exists").wait(Bucket=name)
+ changed = changed or bucket_changed
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ versioning_changed, versioning_result = handle_bucket_versioning(s3_client, module, name)
+ result["versioning"] = versioning_result
+
+ # Requester pays
+ requester_pays_changed, requester_pays_result = handle_bucket_requester_pays(s3_client, module, name)
+ result["requester_pays"] = requester_pays_result
+
+ # Public access clock configuration
+ public_access_config_changed, public_access_config_result = handle_bucket_public_access_config(
+ s3_client, module, name
+ )
+ result["public_access_block"] = public_access_config_result
+
+ # Policy
+ policy_changed, current_policy = handle_bucket_policy(s3_client, module, name)
+ result["policy"] = current_policy
+
+ # Tags
+ tags_changed, current_tags_dict = handle_bucket_tags(s3_client, module, name)
+ result["tags"] = current_tags_dict
+
+ # Encryption
+ encryption_changed, current_encryption = handle_bucket_encryption(s3_client, module, name)
+ result["encryption"] = current_encryption
+
+ # -- Bucket ownership
+ bucket_ownership_changed, object_ownership_result = handle_bucket_ownership(s3_client, module, name)
+ result["object_ownership"] = object_ownership_result
+
+ # -- Bucket ACL
+ bucket_acl_changed, bucket_acl_result = handle_bucket_acl(s3_client, module, name)
+ result["acl"] = bucket_acl_result
+
+ # -- Object Lock
+ bucket_object_lock_result = handle_bucket_object_lock(s3_client, module, name)
+ result["object_lock_enabled"] = bucket_object_lock_result
+
# Module exit
+ changed = (
+ changed
+ or versioning_changed
+ or requester_pays_changed
+ or public_access_config_changed
+ or policy_changed
+ or tags_changed
+ or encryption_changed
+ or bucket_ownership_changed
+ or bucket_acl_changed
+ )
module.exit_json(changed=changed, name=name, **result)
-def bucket_exists(s3_client, bucket_name):
+def bucket_exists(s3_client, bucket_name: str) -> bool:
+ """
+ Checks if a given bucket exists in an AWS S3 account.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the bucket to check for existence.
+ Returns:
+ True if the bucket exists, False otherwise.
+ """
try:
s3_client.head_bucket(Bucket=bucket_name)
- bucket_exists = True
+ return True
except is_boto3_error_code("404"):
- bucket_exists = False
- return bucket_exists
+ return False
@AWSRetry.exponential_backoff(max_delay=120)
-def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False):
+def create_bucket(s3_client, bucket_name: str, location: str, object_lock_enabled: bool = False) -> bool:
+ """
+ Create an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the bucket to create.
+ location (str): The AWS region where the bucket should be created. If None, it defaults to "us-east-1".
+ object_lock_enabled (bool): Whether to enable object lock for the bucket. Defaults to False.
+ Returns:
+ True if the bucket was successfully created, False otherwise.
+ """
try:
params = {"Bucket": bucket_name}
@@ -770,22 +974,56 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_tagging(s3_client, bucket_name, tags):
+def put_bucket_tagging(s3_client, bucket_name: str, tags: dict):
+ """
+ Set tags for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ tags (dict): A dictionary containing the tags to be set on the bucket.
+ Returns:
+ None
+ """
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_policy(s3_client, bucket_name, policy):
+def put_bucket_policy(s3_client, bucket_name: str, policy: dict):
+ """
+ Set the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ policy (dict): A dictionary containing the policy to be set on the bucket.
+ Returns:
+ None
+ """
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_policy(s3_client, bucket_name):
+def delete_bucket_policy(s3_client, bucket_name: str):
+ """
+ Delete the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_policy(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_policy(s3_client, bucket_name):
+def get_bucket_policy(s3_client, bucket_name: str) -> str:
+ """
+ Get the policy for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Current bucket policy.
+ """
try:
current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get("Policy")
if not current_policy_string:
@@ -798,33 +1036,83 @@ def get_bucket_policy(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_request_payment(s3_client, bucket_name, payer):
+def put_bucket_request_payment(s3_client, bucket_name: str, payer: str):
+ """
+ Set the request payment configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ payer (str): The entity responsible for charges related to fulfilling the request.
+ Returns:
+ None
+ """
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={"Payer": payer})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_request_payment(s3_client, bucket_name):
+def get_bucket_request_payment(s3_client, bucket_name: str) -> str:
+ """
+ Get the request payment configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Payer of the download and request fees.
+ """
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get("Payer")
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_versioning(s3_client, bucket_name):
+def get_bucket_versioning(s3_client, bucket_name: str) -> dict:
+ """
+ Get the versioning configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Returns the versioning state of a bucket.
+ """
return s3_client.get_bucket_versioning(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+def put_bucket_versioning(s3_client, bucket_name: str, required_versioning: str):
+ """
+ Set the versioning configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ required_versioning (str): The desired versioning state for the bucket ("Enabled", "Suspended").
+ Returns:
+ None
+ """
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": required_versioning})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_object_lock_enabled(s3_client, bucket_name):
+def get_bucket_object_lock_enabled(s3_client, bucket_name: str) -> bool:
+ """
+ Retrieve the object lock configuration status for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ True if object lock is enabled for the bucket, False otherwise.
+ """
object_lock_configuration = s3_client.get_object_lock_configuration(Bucket=bucket_name)
return object_lock_configuration["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_encryption(s3_client, bucket_name):
+def get_bucket_encryption(s3_client, bucket_name: str) -> dict:
+ """
+ Retrieve the encryption configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Encryption configuration of the bucket.
+ """
try:
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
return (
@@ -839,7 +1127,15 @@ def get_bucket_encryption(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def get_bucket_key(s3_client, bucket_name):
+def get_bucket_key(s3_client, bucket_name: str) -> bool:
+ """
+ Retrieve the status of server-side encryption for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ Whether or not if server-side encryption is enabled for the bucket.
+ """
try:
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
return result.get("ServerSideEncryptionConfiguration", {}).get("Rules", [])[0].get("BucketKeyEnabled")
@@ -849,7 +1145,17 @@ def get_bucket_key(s3_client, bucket_name):
return None
-def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
+def put_bucket_encryption_with_retry(module: AnsibleAWSModule, s3_client, name: str, expected_encryption: dict) -> dict:
+ """
+ Set the encryption configuration for an S3 bucket with retry logic.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ name (str): The name of the S3 bucket.
+ expected_encryption (dict): A dictionary containing the expected encryption configuration.
+ Returns:
+ Updated encryption configuration of the bucket.
+ """
max_retries = 3
for retries in range(1, max_retries + 1):
try:
@@ -877,14 +1183,33 @@ def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryptio
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_encryption(s3_client, bucket_name, encryption):
+def put_bucket_encryption(s3_client, bucket_name: str, encryption: dict) -> None:
+ """
+ Set the encryption configuration for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ encryption (dict): A dictionary containing the encryption configuration.
+ Returns:
+ None
+ """
server_side_encryption_configuration = {"Rules": [{"ApplyServerSideEncryptionByDefault": encryption}]}
s3_client.put_bucket_encryption(
Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration
)
-def put_bucket_key_with_retry(module, s3_client, name, expected_encryption):
+def put_bucket_key_with_retry(module: AnsibleAWSModule, s3_client, name: str, expected_encryption: bool) -> dict:
+ """
+ Set the status of server-side encryption for an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ name (str): The name of the S3 bucket.
+ expected_encryption (bool): The expected status of server-side encryption using AWS KMS.
+ Returns:
+ The updated status of server-side encryption using AWS KMS for the bucket.
+ """
max_retries = 3
for retries in range(1, max_retries + 1):
try:
@@ -909,7 +1234,16 @@ def put_bucket_key_with_retry(module, s3_client, name, expected_encryption):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_key(s3_client, bucket_name, encryption):
+def put_bucket_key(s3_client, bucket_name: str, encryption: bool) -> None:
+ """
+ Set the status of server-side encryption for an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ encryption (bool): The status of server-side encryption using AWS KMS.
+ Returns:
+ None
+ """
# server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]}
encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name)
encryption_status["ServerSideEncryptionConfiguration"]["Rules"][0]["BucketKeyEnabled"] = encryption
@@ -919,17 +1253,41 @@ def put_bucket_key(s3_client, bucket_name, encryption):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_tagging(s3_client, bucket_name):
+def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
+ """
+ Delete the tagging configuration of an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_tagging(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_encryption(s3_client, bucket_name):
+def delete_bucket_encryption(s3_client, bucket_name: str) -> None:
+ """
+ Delete the encryption configuration of an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
s3_client.delete_bucket_encryption(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=["OperationAborted"])
-def delete_bucket(s3_client, bucket_name):
+def delete_bucket(s3_client, bucket_name: str) -> None:
+ """
+ Delete an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
try:
s3_client.delete_bucket(Bucket=bucket_name)
except is_boto3_error_code("NoSuchBucket"):
@@ -939,40 +1297,74 @@ def delete_bucket(s3_client, bucket_name):
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_public_access(s3_client, bucket_name, public_acces):
+def put_bucket_public_access(s3_client, bucket_name: str, public_acces: dict) -> None:
"""
Put new public access block to S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ public_access (dict): The public access block configuration.
+ Returns:
+ None
"""
s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_public_access(s3_client, bucket_name):
+def delete_bucket_public_access(s3_client, bucket_name: str) -> None:
"""
Delete public access block from S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.delete_public_access_block(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def delete_bucket_ownership(s3_client, bucket_name):
+def delete_bucket_ownership(s3_client, bucket_name: str) -> None:
"""
Delete bucket ownership controls from S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.delete_bucket_ownership_controls(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"])
-def put_bucket_ownership(s3_client, bucket_name, target):
+def put_bucket_ownership(s3_client, bucket_name: str, target: str) -> None:
"""
Put bucket ownership controls for S3 bucket
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ None
"""
s3_client.put_bucket_ownership_controls(
Bucket=bucket_name, OwnershipControls={"Rules": [{"ObjectOwnership": target}]}
)
-def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+def wait_policy_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_policy: dict, should_fail: bool = True
+) -> dict:
+ """
+ Wait for a bucket policy to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_policy (dict): The expected bucket policy.
+ should_fail (bool): Flag indicating whether to fail if the policy is not applied within the expected time. Default is True.
+ Returns:
+ The current policy applied to the bucket, or None if the policy failed to apply within the expected time.
+ """
for dummy in range(0, 12):
try:
current_policy = get_bucket_policy(s3_client, bucket_name)
@@ -993,7 +1385,20 @@ def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, shou
return None
-def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+def wait_payer_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_payer: bool, should_fail=True
+) -> str:
+ """
+ Wait for the requester pays setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_payer (bool): The expected status of the requester pays setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ Returns:
+ The current status of the requester pays setting applied to the bucket.
+ """
for dummy in range(0, 12):
try:
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
@@ -1013,7 +1418,21 @@ def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should
return None
-def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+def wait_encryption_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_encryption: dict, should_fail=True, retries=12
+) -> dict:
+ """
+ Wait for the encryption setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_encryption(dict): The expected encryption setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ retries (int): The number of retries to attempt. Default is 12.
+ Returns:
+ The current encryption setting applied to the bucket.
+ """
for dummy in range(0, retries):
try:
encryption = get_bucket_encryption(s3_client, bucket_name)
@@ -1034,7 +1453,21 @@ def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encrypti
return encryption
-def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
+def wait_bucket_key_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, expected_encryption: bool, should_fail=True, retries=12
+) -> bool:
+ """
+ Wait for the bucket key setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_encryption (bool): The expected bucket key setting.
+ should_fail (bool): Flag indicating whether to fail if the setting is not applied within the expected time. Default is True.
+ retries (int): The number of retries to attempt. Default is 12.
+ Returns:
+ The current bucket key setting applied to the bucket.
+ """
for dummy in range(0, retries):
try:
encryption = get_bucket_key(s3_client, bucket_name)
@@ -1054,7 +1487,19 @@ def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encrypti
return encryption
-def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+def wait_versioning_is_applied(
+ module: AnsibleAWSModule, s3_client, bucket_name: str, required_versioning: dict
+) -> dict:
+ """
+ Wait for the versioning setting to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ required_versioning (dict): The required versioning status.
+ Returns:
+ The current versioning status applied to the bucket.
+ """
for dummy in range(0, 24):
try:
versioning_status = get_bucket_versioning(s3_client, bucket_name)
@@ -1071,7 +1516,17 @@ def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioni
)
-def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+def wait_tags_are_applied(module: AnsibleAWSModule, s3_client, bucket_name: str, expected_tags_dict: dict) -> dict:
+ """
+ Wait for the tags to be applied to an S3 bucket.
+ Parameters:
+ module (AnsibleAWSModule): The Ansible module object.
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ expected_tags_dict (dict): The expected tags dictionary.
+ Returns:
+ The current tags dictionary applied to the bucket.
+ """
for dummy in range(0, 12):
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
@@ -1088,7 +1543,15 @@ def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
)
-def get_current_bucket_tags_dict(s3_client, bucket_name):
+def get_current_bucket_tags_dict(s3_client, bucket_name: str) -> dict:
+ """
+ Get the current tags applied to an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The current tags dictionary applied to the bucket.
+ """
try:
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get("TagSet")
except is_boto3_error_code("NoSuchTagSet"):
@@ -1100,9 +1563,14 @@ def get_current_bucket_tags_dict(s3_client, bucket_name):
return boto3_tag_list_to_ansible_dict(current_tags)
-def get_bucket_public_access(s3_client, bucket_name):
+def get_bucket_public_access(s3_client, bucket_name: str) -> dict:
"""
- Get current bucket public access block
+ Get current public access block configuration for a bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The current public access block configuration for the bucket.
"""
try:
bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name)
@@ -1111,9 +1579,14 @@ def get_bucket_public_access(s3_client, bucket_name):
return {}
-def get_bucket_ownership_cntrl(s3_client, bucket_name):
+def get_bucket_ownership_cntrl(s3_client, bucket_name: str) -> str:
"""
- Get current bucket public access block
+ Get the current bucket ownership controls.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ bucket_name (str): The name of the S3 bucket.
+ Returns:
+ The object ownership rule
"""
try:
bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name)
@@ -1122,13 +1595,31 @@ def get_bucket_ownership_cntrl(s3_client, bucket_name):
return None
-def paginated_list(s3_client, **pagination_params):
+def paginated_list(s3_client, **pagination_params) -> Iterator[List[str]]:
+ """
+ Paginate through the list of objects in an S3 bucket.
+ This function yields the keys of objects in the S3 bucket, paginating through the results.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ **pagination_params: Additional parameters to pass to the paginator.
+ Yields:
+ list: A list of keys of objects in the bucket for each page of results.
+ """
pg = s3_client.get_paginator("list_objects_v2")
for page in pg.paginate(**pagination_params):
yield [data["Key"] for data in page.get("Contents", [])]
-def paginated_versions_list(s3_client, **pagination_params):
+def paginated_versions_list(s3_client, **pagination_params) -> Iterator[List[Tuple[str, str]]]:
+ """
+ Paginate through the list of object versions in an S3 bucket.
+ This function yields the keys and version IDs of object versions in the S3 bucket, paginating through the results.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ **pagination_params: Additional parameters to pass to the paginator.
+ Yields:
+ list: A list of tuples containing keys and version IDs of object versions in the bucket for each page of results.
+ """
try:
pg = s3_client.get_paginator("list_object_versions")
for page in pg.paginate(**pagination_params):
@@ -1140,7 +1631,48 @@ def paginated_versions_list(s3_client, **pagination_params):
yield []
-def destroy_bucket(s3_client, module):
+def delete_objects(s3_client, module: AnsibleAWSModule, name: str) -> None:
+ """
+ Delete objects from an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ name (str): The name of the S3 bucket.
+ Returns:
+ None
+ """
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get("VersionId") or fk.get("VersionId") == "null":
+ fk.pop("VersionId")
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys})
+ if resp.get("Errors"):
+ objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]])
+ module.fail_json(
+ msg=(f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}"),
+ errors=resp["Errors"],
+ response=resp,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+
+def destroy_bucket(s3_client, module: AnsibleAWSModule) -> None:
+ """
+ This function destroys an S3 bucket.
+ Parameters:
+ s3_client (boto3.client): The Boto3 S3 client object.
+ module (AnsibleAWSModule): The Ansible module object.
+ Returns:
+ None
+ """
force = module.params.get("force")
name = module.params.get("name")
try:
@@ -1156,29 +1688,9 @@ def destroy_bucket(s3_client, module):
if force:
# if there are contents then we need to delete them (including versions) before we can delete the bucket
try:
- for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
- formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs]
- for fk in formatted_keys:
- # remove VersionId from cases where they are `None` so that
- # unversioned objects are deleted using `DeleteObject`
- # rather than `DeleteObjectVersion`, improving backwards
- # compatibility with older IAM policies.
- if not fk.get("VersionId") or fk.get("VersionId") == "null":
- fk.pop("VersionId")
-
- if formatted_keys:
- resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys})
- if resp.get("Errors"):
- objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]])
- module.fail_json(
- msg=(
- f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}"
- ),
- errors=resp["Errors"],
- response=resp,
- )
+ delete_objects(s3_client, module, name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket")
+ module.fail_json_aws(e, msg="Failed while deleting objects")
try:
delete_bucket(s3_client, name)
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
index 2cd897c89..0486d3b9f 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_object.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py
@@ -473,7 +473,7 @@ def key_check(module, s3, bucket, obj, version=None, validate=True):
def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None):
- s3_etag = get_etag(s3, bucket, obj, version=version)
+ s3_etag = _head_object(s3, bucket, obj, version=version).get("ETag")
if local_file is not None:
local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
else:
@@ -481,27 +481,49 @@ def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content
return s3_etag == local_etag
-def get_etag(s3, bucket, obj, version=None):
+def _head_object(s3, bucket, obj, version=None):
try:
if version:
key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
else:
key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj)
if not key_check:
- return None
- return key_check["ETag"]
+ return {}
+ key_check.pop("ResponseMetadata")
+ return key_check
except is_boto3_error_code("404"):
- return None
+ return {}
+
+
+def _get_object_content(module, s3, bucket, obj, version=None):
+ try:
+ if version:
+ contents = s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read()
+ else:
+ contents = s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read()
+ return contents
+ except is_boto3_error_code(["404", "403"]) as e:
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission.
+ module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
+ except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except
+ raise Sigv4Required()
+ except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
+ except (
+ botocore.exceptions.BotoCoreError,
+ botocore.exceptions.ClientError,
+ boto3.exceptions.Boto3Error,
+ ) as e: # pylint: disable=duplicate-except
+ raise S3ObjectFailure(f"Could not find the key {obj}.", e)
def get_s3_last_modified_timestamp(s3, bucket, obj, version=None):
- if version:
- key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
- else:
- key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj)
- if not key_check:
- return None
- return key_check["LastModified"].timestamp()
+ last_modified = None
+ obj_info = _head_object(s3, bucket, obj, version)
+ if obj_info:
+ last_modified = obj_info["LastModified"].timestamp()
+ return last_modified
def is_local_object_latest(s3, bucket, obj, version=None, local_file=None):
@@ -550,22 +572,6 @@ def paginated_list(s3, **pagination_params):
yield data["Key"]
-def paginated_versioned_list_with_fallback(s3, **pagination_params):
- try:
- versioned_pg = s3.get_paginator("list_object_versions")
- for page in versioned_pg.paginate(**pagination_params):
- delete_markers = [
- {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("DeleteMarkers", [])
- ]
- current_objects = [
- {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("Versions", [])
- ]
- yield delete_markers + current_objects
- except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ["AccessDenied"]):
- for key in paginated_list(s3, **pagination_params):
- yield [{"Key": key}]
-
-
def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None):
pagination_params = {
"Bucket": bucket,
@@ -779,29 +785,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
- try:
- # Note: Something of a permissions related hack
- # get_object returns the HEAD information, plus a *stream* which can be read.
- # because the stream's dropped on the floor, we never pull the data and this is the
- # functional equivalent of calling get_head which still relying on the 'GET' permission
- if version:
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)
- else:
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)
- except is_boto3_error_code(["404", "403"]) as e:
- # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
- # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
- module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
- except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except
- raise Sigv4Required()
- except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg=f"Could not find the key {obj}.")
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- boto3.exceptions.Boto3Error,
- ) as e: # pylint: disable=duplicate-except
- raise S3ObjectFailure(f"Could not find the key {obj}.", e)
+ _get_object_content(module, s3, bucket, obj, version)
optional_kwargs = {"ExtraArgs": {"VersionId": version}} if version else {}
for x in range(0, retries + 1):
@@ -827,27 +811,8 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
def download_s3str(module, s3, bucket, obj, version=None):
if module.check_mode:
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- try:
- if version:
- contents = to_native(
- s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read()
- )
- else:
- contents = to_native(s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read())
- module.exit_json(msg="GET operation complete", contents=contents, changed=True)
- except is_boto3_error_message("require AWS Signature Version 4"):
- raise Sigv4Required()
- except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except
- module.fail_json_aws(
- e,
- msg=f"Failed while getting contents of object {obj} as a string.",
- )
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- boto3.exceptions.Boto3Error,
- ) as e: # pylint: disable=duplicate-except
- raise S3ObjectFailure(f"Failed while getting contents of object {obj} as a string.", e)
+ contents = to_native(_get_object_content(module, s3, bucket, obj, version))
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True):
@@ -997,13 +962,13 @@ def ensure_tags(client, module, bucket, obj):
return current_tags_dict, changed
-def get_binary_content(vars):
+def get_binary_content(s3_vars):
# the content will be uploaded as a byte string, so we must encode it first
bincontent = None
- if vars.get("content"):
- bincontent = vars["content"].encode("utf-8")
- if vars.get("content_base64"):
- bincontent = base64.standard_b64decode(vars["content_base64"])
+ if s3_vars.get("content"):
+ bincontent = s3_vars["content"].encode("utf-8")
+ if s3_vars.get("content_base64"):
+ bincontent = base64.standard_b64decode(s3_vars["content_base64"])
return bincontent
@@ -1271,6 +1236,17 @@ def check_object_tags(module, connection, bucket, obj):
return diff
+def calculate_object_etag(module, s3, bucket, obj, head_etag, version=None):
+ etag = head_etag
+ if "-" in etag:
+ # object has been created using multipart upload, compute ETag using
+ # object content to ensure idempotency.
+ contents = _get_object_content(module, s3, bucket, obj, version)
+ # Set ETag to None, to force function to compute ETag from content
+ etag = calculate_etag_content(module, contents, None, s3, bucket, obj)
+ return etag
+
+
def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None):
try:
params = {"Bucket": bucket, "Key": obj}
@@ -1281,14 +1257,33 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate,
changed=False,
)
- s_etag = get_etag(s3, src_bucket, src_obj, version=versionId)
- d_etag = get_etag(s3, bucket, obj)
- if s_etag == d_etag:
+ s_obj_info = _head_object(s3, src_bucket, src_obj, version=versionId)
+ d_obj_info = _head_object(s3, bucket, obj)
+ do_match = True
+ diff_msg = None
+ if d_obj_info:
+ src_etag = calculate_object_etag(module, s3, src_bucket, src_obj, s_obj_info.get("ETag"), versionId)
+ dst_etag = calculate_object_etag(module, s3, bucket, obj, d_obj_info.get("ETag"))
+ if src_etag != dst_etag:
+ # Source and destination objects ETag differ
+ do_match = False
+ diff_msg = "ETag from source and destination differ"
+ if do_match and metadata and metadata != d_obj_info.get("Metadata"):
+ # Metadata from module inputs differs from what has been retrieved from object header
+ diff_msg = "Would have update object Metadata if not running in check mode."
+ do_match = False
+ else:
+ # The destination object does not exists
+ do_match = False
+ diff_msg = "Would have copy object if not running in check mode."
+
+ if do_match:
+ # S3 objects are equals, ensure tags will not be updated
if module.check_mode:
changed = check_object_tags(module, s3, bucket, obj)
result = {}
if changed:
- result.update({"msg": "Would have update object tags is not running in check mode."})
+ result.update({"msg": "Would have update object tags if not running in check mode."})
return changed, result
# Ensure tags
@@ -1297,8 +1292,9 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate,
if changed:
result = {"msg": "tags successfully updated.", "tags": tags}
return changed, result
- elif module.check_mode:
- return True, {"msg": "ETag from source and destination differ"}
+ # S3 objects differ
+ if module.check_mode:
+ return True, {"msg": diff_msg}
else:
changed = True
bucketsrc = {
diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
index 65bd5e328..39f0c2798 100644
--- a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
+++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py
@@ -741,8 +741,10 @@ def main():
result.append(object_details)
elif object_name is None:
object_list = list_bucket_objects(connection, module, bucket_name)
- for object in object_list:
- result.append(get_object_details(connection, module, bucket_name, object, requested_object_details))
+ for bucket_object in object_list:
+ result.append(
+ get_object_details(connection, module, bucket_name, bucket_object, requested_object_details)
+ )
elif not requested_object_details and object_name:
# if specific details are not requested, return object metadata
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml
index d83b14440..0b07bec77 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml
@@ -7,7 +7,7 @@
region: "{{ aws_region }}"
block:
- name: List available Regions
- community.aws.aws_region_info:
+ amazon.aws.aws_region_info:
register: regions
- name: check task return attributes
vars:
@@ -22,7 +22,7 @@
- '"region_name" in first_region'
- name: List available Regions - check_mode
- community.aws.aws_region_info:
+ amazon.aws.aws_region_info:
register: check_regions
- name: check task return attributes - check_mode
vars:
@@ -37,7 +37,7 @@
- '"region_name" in first_region'
- name: Filter available Regions using - ("region-name")
- community.aws.aws_region_info:
+ amazon.aws.aws_region_info:
filters:
region-name: us-west-1
register: us_west_1
@@ -58,7 +58,7 @@
- first_region.region_name == 'us-west-1'
- name: Filter available Regions using _ ("region_name")
- community.aws.aws_region_info:
+ amazon.aws.aws_region_info:
filters:
region_name: us-west-2
register: us_west_2
@@ -79,7 +79,7 @@
- first_region.region_name == 'us-west-2'
- name: Filter available Regions using _ and - to check precedence
- community.aws.aws_region_info:
+ amazon.aws.aws_region_info:
filters:
region-name: eu-west-1
region_name: eu-central-1
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml
index 7cf27ce8c..11d550d48 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml
@@ -12,7 +12,7 @@
# ============================================================
- name: Create an IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ backup_iam_role_name }}"
assume_role_policy_document: '{{ lookup("file", "backup-policy.json") }}'
create_instance_profile: false
@@ -745,7 +745,7 @@
ignore_errors: true
- name: Delete IAM role created during this test
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ backup_iam_role_name }}"
state: absent
ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml
index 3d4f60144..6f9e8fe48 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml
@@ -141,7 +141,7 @@
register: kms_key2
- name: Create CloudWatch IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ cloudwatch_role }}"
assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}"
@@ -167,7 +167,7 @@
policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}"
- name: Create CloudWatch IAM Role with no kms permissions
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ cloudwatch_no_kms_role }}"
assume_role_policy_document: "{{ lookup('template', 'cloudtrail-no-kms-assume-policy.j2') }}"
@@ -551,7 +551,7 @@
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
check_mode: true
- ansible.builtin.assert:
@@ -563,28 +563,28 @@
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
- ansible.builtin.assert:
that:
- output is changed
- output.trail.name == cloudtrail_name
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Set CloudWatch Log Group (no change)
amazon.aws.cloudtrail:
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
- ansible.builtin.assert:
that:
- output is not changed
- output.trail.name == cloudtrail_name
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: No-op update to trail
amazon.aws.cloudtrail:
@@ -596,7 +596,7 @@
- output is not changed
- output.trail.name == cloudtrail_name
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Get the trail info with CloudWatch Log Group
amazon.aws.cloudtrail_info:
@@ -608,49 +608,49 @@
ansible.builtin.assert:
that:
- info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Update CloudWatch Log Group (CHECK MODE)
amazon.aws.cloudtrail:
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
check_mode: true
- ansible.builtin.assert:
that:
- output is changed
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Update CloudWatch Log Group
amazon.aws.cloudtrail:
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
- ansible.builtin.assert:
that:
- output is changed
- output.trail.name == cloudtrail_name
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Update CloudWatch Log Group (no change)
amazon.aws.cloudtrail:
state: present
name: "{{ cloudtrail_name }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
register: output
- ansible.builtin.assert:
that:
- output is not changed
- output.trail.name == cloudtrail_name
- output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
- name: Get the trail info with CloudWatch Log Group after update
amazon.aws.cloudtrail_info:
@@ -662,7 +662,7 @@
ansible.builtin.assert:
that:
- info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn
+ - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.iam_role.arn
#- name: 'Remove CloudWatch Log Group (CHECK MODE)'
# amazon.aws.cloudtrail:
@@ -1332,7 +1332,7 @@
# Assume role to a role with Denied access to KMS
- amazon.aws.sts_assume_role:
- role_arn: "{{ output_cloudwatch_no_kms_role.arn }}"
+ role_arn: "{{ output_cloudwatch_no_kms_role.iam_role.arn }}"
role_session_name: cloudtrailNoKms
region: "{{ aws_region }}"
register: noKms_assumed_role
@@ -1438,7 +1438,7 @@
s3_key_prefix: "{{ cloudtrail_prefix }}"
sns_topic_name: "{{ sns_topic }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
is_multi_region_trail: true
include_global_events: true
enable_log_file_validation: true
@@ -1468,7 +1468,7 @@
s3_key_prefix: "{{ cloudtrail_prefix }}"
sns_topic_name: "{{ sns_topic }}"
cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}"
- cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}"
+ cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.iam_role.arn }}"
is_multi_region_trail: true
include_global_events: true
enable_log_file_validation: true
@@ -1572,7 +1572,7 @@
policy_name: CloudWatch
ignore_errors: true
- name: Delete CloudWatch IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ cloudwatch_role }}"
ignore_errors: true
@@ -1584,7 +1584,7 @@
policy_name: CloudWatchNokms
ignore_errors: true
- name: Delete CloudWatch No KMS IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ cloudwatch_no_kms_role }}"
ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml
index 14f44c2eb..336ebdb08 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml
@@ -7,7 +7,7 @@
region: "{{ aws_region }}"
block:
- name: Create IAM role for test
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ first_iam_role }}"
assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
@@ -17,7 +17,7 @@
register: iam_role
- name: Create second IAM role for test
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ second_iam_role }}"
assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
@@ -46,7 +46,7 @@
- ansible.builtin.assert:
that:
- - instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")
+ - instance_with_role.instances[0].iam_instance_profile.arn == iam_role.iam_role.arn.replace(":role/", ":instance-profile/")
- name: Make instance with an instance_role(check mode)
amazon.aws.ec2_instance:
@@ -55,7 +55,7 @@
image_id: "{{ ec2_ami_id }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
- instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
+ instance_role: "{{ iam_role.iam_role.arn.replace(':role/', ':instance-profile/') }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
@@ -86,7 +86,7 @@
image_id: "{{ ec2_ami_id }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
- instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
+ instance_role: "{{ iam_role_2.iam_role.arn.replace(':role/', ':instance-profile/') }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
@@ -105,7 +105,7 @@
- ansible.builtin.assert:
that:
- - updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")
+ - updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.iam_role.arn.replace(":role/", ":instance-profile/")
- updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id
always:
@@ -119,7 +119,7 @@
ignore_errors: true
- name: Delete IAM role for test
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ item }}"
delete_instance_profile: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
index edeccb4ea..3867885a7 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml
@@ -685,7 +685,6 @@
ansible.builtin.assert:
that:
- not delete_volume_result_idem.changed
- - '"Volume "+ volume2.volume_id +" does not exist" in delete_volume_result_idem.msg'
# Originally from ec2_vol_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
index b591e4ae6..399f98acf 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml
@@ -4,6 +4,7 @@
resource_short: "{{ '%0.8x' % ((16**8) | random(seed=resource_prefix)) }}"
alb_name: alb-test-{{ resource_short }}
alb_2_name: alb-test-2-{{ resource_short }}
+alb_name_multiple_listener_test: alb-test-{{ resource_short }}-lt
tg_name: alb-test-{{ resource_short }}
tg_2_name: alb-test-2-{{ resource_short }}
vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16
@@ -26,3 +27,15 @@ elb_access_log_account_id_map:
us-gov-west-1: "048591011584"
elb_account_id: "{{ elb_access_log_account_id_map[aws_region] }}"
+
+local_certs:
+ - priv_key: "{{ remote_tmp_dir }}/private-1.pem"
+ cert: "{{ remote_tmp_dir }}/public-1.pem"
+ csr: "{{ remote_tmp_dir }}/csr-1.csr"
+ domain: elb-classic.{{ tiny_prefix }}.ansible.test
+ name: "{{ resource_prefix }}_{{ resource_prefix }}_1"
+ - priv_key: "{{ remote_tmp_dir }}/private-2.pem"
+ cert: "{{ remote_tmp_dir }}/public-2.pem"
+ csr: "{{ remote_tmp_dir }}/csr-2.csr"
+ domain: elb-classic.{{ tiny_prefix }}.ansible.test
+ name: "{{ resource_prefix }}_{{ resource_prefix }}_2"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/meta/main.yml
new file mode 100644
index 000000000..bef04ab7f
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - setup_ec2_facts
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/alb_with_multiple_listener_certs.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/alb_with_multiple_listener_certs.yml
new file mode 100644
index 000000000..af55c29bf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/alb_with_multiple_listener_certs.yml
@@ -0,0 +1,127 @@
+- name: Run tests
+ block:
+ - name: Generate private key for local certs
+ community.crypto.openssl_privatekey:
+ path: "{{ item.priv_key }}"
+ type: RSA
+ size: 2048
+ with_items: "{{ local_certs }}"
+
+ - name: Generate an OpenSSL Certificate Signing Request for own certs
+ community.crypto.openssl_csr:
+ path: "{{ item.csr }}"
+ privatekey_path: "{{ item.priv_key }}"
+ common_name: "{{ item.domain }}"
+ with_items: "{{ local_certs }}"
+
+ - name: Generate a Self Signed OpenSSL certificate for own certs
+ community.crypto.x509_certificate:
+ provider: selfsigned
+ path: "{{ item.cert }}"
+ csr_path: "{{ item.csr }}"
+ privatekey_path: "{{ item.priv_key }}"
+ selfsigned_digest: sha256
+ register: cert_create_result
+ with_items: "{{ local_certs }}"
+
+ - name: upload certificates
+ community.aws.acm_certificate:
+ name_tag: "{{ item.name }}"
+ certificate: "{{ lookup('file', item.cert ) }}"
+ private_key: "{{ lookup('file', item.priv_key ) }}"
+ state: present
+ tags:
+ Application: search
+ Environment: development
+ purge_tags: false
+ register: upload
+ with_items: "{{ local_certs }}"
+ until: upload is succeeded
+ retries: 5
+ delay: 10
+
+ - ansible.builtin.set_fact:
+ cert_1_arn: "{{ upload.results[0].certificate.arn }}"
+ cert_2_arn: "{{ upload.results[1].certificate.arn }}"
+
+ - name: Create a target group for testing
+ community.aws.elb_target_group:
+ name: "{{ tg_name }}"
+ protocol: http
+ port: 80
+ vpc_id: "{{ vpc_id }}"
+ state: present
+
+ - name: Create an ALB with listener having multiple certificates
+ amazon.aws.elb_application_lb:
+ name: "{{ alb_name_multiple_listener_test }}"
+ subnets: "{{ public_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: present
+ purge_listeners: False
+ listeners:
+ - Protocol: HTTPS
+ Port: 446
+ SslPolicy: ELBSecurityPolicy-TLS13-1-2-2021-06
+ Certificates: # The ARN of the certificate
+ - CertificateArn: "{{ cert_1_arn }}"
+ - CertificateArn: "{{ cert_2_arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ register: alb
+
+ - name: Gather information about a particular ALB given its ARN #returns only default cert
+ amazon.aws.elb_application_lb_info:
+ load_balancer_arns:
+ - "{{ alb.load_balancer_arn }}"
+ register: alb_info
+
+ - name: obtain information about a certificate 1
+ community.aws.acm_certificate_info:
+ certificate_arn: "{{ cert_1_arn }}"
+ register: cert_1_info
+
+ - name: obtain information about a certificate 2
+ community.aws.acm_certificate_info:
+ certificate_arn: "{{ cert_2_arn }}"
+ register: cert_2_info
+
+ - name: Assert that both certificiates are in use by test load balancer
+ ansible.builtin.assert:
+ that:
+ - cert_1_info.certificates[0].in_use_by[0] == alb_info.load_balancers[0].load_balancer_arn
+ - cert_2_info.certificates[0].in_use_by[0] == alb_info.load_balancers[0].load_balancer_arn
+
+ always:
+ - name: Delete test ALB
+ amazon.aws.elb_application_lb:
+ name: "{{ alb_name_multiple_listener_test }}"
+ subnets: "{{ public_subnets }}"
+ security_groups: "{{ sec_group.group_id }}"
+ state: absent
+ purge_listeners: False
+ listeners:
+ - Protocol: HTTPS
+ Port: 446
+ SslPolicy: ELBSecurityPolicy-TLS13-1-2-2021-06
+ Certificates: # The ARN of the certificate
+ - CertificateArn: "{{ cert_1_arn }}"
+ - CertificateArn: "{{ cert_2_arn }}"
+ DefaultActions:
+ - Type: forward
+ TargetGroupName: "{{ tg_name }}"
+ ignore_errors: true
+
+ - name: delete a certificate with a particular ARN
+ community.aws.acm_certificate:
+ certificate_arn: "{{ item }}"
+ state: absent
+ register: delete_acm
+ with_items:
+ - "{{ cert_1_arn }}"
+ - "{{ cert_2_arn }}"
+ retries: 5
+ delay: 5
+ until: delete_acm is not failed
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
index 6edc6416d..28d4bdbdd 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml
@@ -133,6 +133,10 @@
encryption: aws:kms
policy: "{{ lookup('template', 'policy.json') }}"
+
+ - name: Run tests for creating ALB with listener having multiple certificates
+ ansible.builtin.import_tasks: alb_with_multiple_listener_certs.yml
+
- name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS)
amazon.aws.elb_application_lb:
name: "{{ alb_name }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml
index 54015a446..269cd51ec 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml
@@ -15,7 +15,7 @@
state: present
- name: Create Safe IAM Managed Policy
- community.aws.iam_managed_policy:
+ amazon.aws.iam_managed_policy:
state: present
policy_name: "{{ custom_policy_name }}"
policy_description: A safe (deny-all) managed policy
@@ -54,7 +54,7 @@
state: absent
- name: Remove Safe IAM Managed Policy
- community.aws.iam_managed_policy:
+ amazon.aws.iam_managed_policy:
state: absent
policy_name: "{{ custom_policy_name }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml
index 794b7a4ae..cbebc966a 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml
@@ -17,7 +17,7 @@
# Prepare
- name: Prepare IAM Roles
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ item }}"
path: "{{ test_path }}"
@@ -504,7 +504,7 @@
- "{{ test_role }}-2"
- name: Remove IAM Roles
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ item }}"
path: "{{ test_path }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml
index 51ece2c3a..4257634b7 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml
@@ -2,3 +2,6 @@
policy_name: "{{ resource_prefix }}-policy"
policy_path: "/ansible-test-{{ tiny_prefix }}/"
policy_description: "An example Managed Policy description"
+test_role: "{{ resource_prefix }}-mp-role"
+test_user: "{{ resource_prefix }}-mp-user"
+test_group: "{{ resource_prefix }}-mp-group"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/files/deny-assume.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/files/deny-assume.json
new file mode 100644
index 000000000..73e877158
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/files/deny-assume.json
@@ -0,0 +1,10 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": "sts:AssumeRole",
+ "Principal": { "Service": "ec2.amazonaws.com" },
+ "Effect": "Deny"
+ }
+ ]
+}
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml
index c6ab19a74..ec4238b85 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml
@@ -9,6 +9,21 @@
collections:
- amazon.aws
block:
+ - name: Create IAM group
+ amazon.aws.iam_group:
+ name: "{{ test_group }}"
+ state: present
+ - name: Create IAM user
+ amazon.aws.iam_user:
+ name: "{{ test_user }}"
+ state: present
+ - name: Create IAM role
+ amazon.aws.iam_role:
+ name: "{{ test_role }}"
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ create_instance_profile: false
+ state: present
+
## Test policy creation
- name: Create IAM managed policy - check mode
amazon.aws.iam_managed_policy:
@@ -448,14 +463,60 @@
- result.policy.tags["Tag C"] == "Value C"
- result.policy.tags["tag d"] == "value d"
+ - name: Attach managed policy to group
+ amazon.aws.iam_group:
+ name: "{{ test_group }}"
+ state: present
+ managed_policies:
+ - "{{ policy_name }}"
+ - name: Attach managed policy to user
+ amazon.aws.iam_user:
+ name: "{{ test_user }}"
+ state: present
+ managed_policies:
+ - "{{ policy_name }}"
+ - name: Attach managed policy to role
+ amazon.aws.iam_role:
+ name: "{{ test_role }}"
+ state: present
+ assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
+ managed_policies:
+ - "{{ policy_name }}"
+
- name: Delete IAM managed policy
amazon.aws.iam_managed_policy:
policy_name: "{{ policy_name }}"
state: absent
+ - name: Delete IAM group
+ amazon.aws.iam_group:
+ name: "{{ test_group }}"
+ state: absent
+ - name: Delete IAM user
+ amazon.aws.iam_user:
+ name: "{{ test_user }}"
+ state: absent
+ - name: Delete IAM role
+ amazon.aws.iam_role:
+ name: "{{ test_role }}"
+ state: absent
+
always:
- name: Delete IAM managed policy
amazon.aws.iam_managed_policy:
policy_name: "{{ policy_name }}"
state: absent
ignore_errors: true # noqa: ignore-errors
+
+ - name: Delete IAM group
+ amazon.aws.iam_group:
+ name: "{{ test_group }}"
+ state: absent
+ - name: Delete IAM user
+ amazon.aws.iam_user:
+ name: "{{ test_user }}"
+ state: absent
+ - name: Delete IAM role
+ amazon.aws.iam_role:
+ name: "{{ test_role }}"
+ state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml
index 9b4fa7167..8d497813a 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml
@@ -9,7 +9,7 @@
- amazon.aws
block:
- name: set iam password policy
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: present
min_pw_length: 8
require_symbols: false
@@ -28,7 +28,7 @@
- result.changed
- name: verify iam password policy has been created
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: present
min_pw_length: 8
require_symbols: false
@@ -47,7 +47,7 @@
- not result.changed
- name: update iam password policy with different settings
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: present
min_pw_length: 15
require_symbols: true
@@ -67,7 +67,7 @@
# Test for regression of #59102
- name: update iam password policy without expiry
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: present
min_pw_length: 15
require_symbols: true
@@ -83,7 +83,7 @@
- result.changed
- name: remove iam password policy
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: absent
register: result
@@ -93,7 +93,7 @@
- result.changed
- name: verify password policy has been removed
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: absent
register: result
@@ -103,6 +103,6 @@
- not result.changed
always:
- name: remove iam password policy
- community.aws.iam_password_policy:
+ amazon.aws.iam_password_policy:
state: absent
register: result
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml
index 9ed065036..afae59a0e 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml
@@ -19,7 +19,7 @@
- result is changed
- name: Create role for tests
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: present
name: "{{ iam_name }}"
assume_role_policy_document: "{{ lookup('file','no_trust.json') }}"
@@ -30,7 +30,7 @@
- result is changed
- name: Create group for tests
- community.aws.iam_group:
+ amazon.aws.iam_group:
state: present
name: "{{ iam_name }}"
register: result
@@ -60,12 +60,12 @@
name: "{{ iam_name }}"
ignore_errors: true
- name: Remove role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ iam_name }}"
ignore_errors: true
- name: Remove group
- community.aws.iam_group:
+ amazon.aws.iam_group:
state: absent
name: "{{ iam_name }}"
ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml
index 8d7bdfb1d..57a4b9e2b 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml
@@ -1,6 +1,7 @@
---
test_role: "{{ resource_prefix }}-role"
-test_path: /{{ resource_prefix }}/
+test_path: "/{{ resource_prefix }}/"
+bad_test_path: "{{ resource_prefix }}"
safe_managed_policy: AWSDenyAll
custom_policy_name: "{{ resource_prefix }}-denyall"
-boundary_policy: arn:aws:iam::aws:policy/AWSDenyAll
+boundary_policy: "arn:aws:iam::aws:policy/AWSDenyAll"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml
index 706853c67..f24731f08 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml
@@ -1,6 +1,6 @@
---
- name: Create minimal role with no boundary policy
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
@@ -10,7 +10,7 @@
- iam_role.iam_role.role_name == test_role
- name: Configure Boundary Policy (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
boundary: "{{ boundary_policy }}"
@@ -21,7 +21,7 @@
- iam_role is changed
- name: Configure Boundary Policy
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
boundary: "{{ boundary_policy }}"
@@ -32,7 +32,7 @@
- iam_role.iam_role.role_name == test_role
- name: Configure Boundary Policy (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
boundary: "{{ boundary_policy }}"
@@ -43,7 +43,7 @@
- iam_role is not changed
- name: Configure Boundary Policy (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
boundary: "{{ boundary_policy }}"
@@ -54,7 +54,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after adding boundary policy
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -77,7 +77,7 @@
- role_info.iam_roles[0].role_name == test_role
- name: Remove IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml
index 7195c5887..e0a33d7ca 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml
@@ -1,6 +1,6 @@
---
- name: Complex IAM Role (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
boundary: "{{ boundary_policy }}"
@@ -20,7 +20,7 @@
- iam_role is changed
- name: iam_role_info after Complex Role creation in check_mode
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -29,7 +29,7 @@
- role_info.iam_roles | length == 0
- name: Complex IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
boundary: "{{ boundary_policy }}"
@@ -59,7 +59,7 @@
- '"role_id" in iam_role.iam_role'
- name: Complex IAM role (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
boundary: "{{ boundary_policy }}"
@@ -79,7 +79,7 @@
- iam_role is not changed
- name: Complex IAM role (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
boundary: "{{ boundary_policy }}"
@@ -99,7 +99,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after Role creation
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml
index 9c81019c8..694c4d16b 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml
@@ -2,12 +2,12 @@
- name: Try running some rapid fire create/delete tests
block:
- name: Minimal IAM Role without instance profile (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
- name: Minimal IAM Role without instance profile (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role_again
@@ -17,12 +17,12 @@
- iam_role_again is not changed
- name: Remove IAM Role (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
register: iam_role
- name: Remove IAM Role (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
register: iam_role_again
@@ -32,12 +32,12 @@
- iam_role_again is not changed
- name: Minimal IAM Role without instance profile (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
- name: Remove IAM Role (rapid)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
register: iam_role_again
@@ -50,14 +50,14 @@
# Role Creation
# (without Instance profile)
- name: iam_role_info before Role creation (no args)
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
register: role_info
- ansible.builtin.assert:
that:
- role_info is succeeded
- name: iam_role_info before Role creation (search for test role)
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -66,7 +66,7 @@
- role_info.iam_roles | length == 0
- name: Minimal IAM Role (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
@@ -76,7 +76,7 @@
- iam_role is changed
- name: iam_role_info after Role creation in check_mode
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -85,7 +85,7 @@
- role_info.iam_roles | length == 0
- name: Minimal IAM Role without instance profile
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
@@ -106,7 +106,7 @@
- '"role_id" in iam_role.iam_role'
- name: Minimal IAM Role without instance profile (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
@@ -116,7 +116,7 @@
- iam_role is not changed
- name: Minimal IAM Role without instance profile (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: false
register: iam_role
@@ -126,7 +126,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after Role creation
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -151,7 +151,7 @@
- role_info.iam_roles[0].tags | length == 0
- name: Remove IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
@@ -161,7 +161,7 @@
- iam_role is changed
- name: iam_role_info after Role deletion
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -173,7 +173,7 @@
# (with path)
- name: Minimal IAM Role with path (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: "{{ test_path }}"
register: iam_role
@@ -183,7 +183,7 @@
- iam_role is changed
- name: Minimal IAM Role with path
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: "{{ test_path }}"
register: iam_role
@@ -203,7 +203,7 @@
- '"role_id" in iam_role.iam_role'
- name: Minimal IAM Role with path (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: "{{ test_path }}"
register: iam_role
@@ -213,7 +213,7 @@
- iam_role is not changed
- name: Minimal IAM Role with path (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: "{{ test_path }}"
register: iam_role
@@ -223,7 +223,7 @@
- iam_role.iam_role.role_name == test_role
- name: Minimal IAM Role with updated path (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: "{{ test_path }}subpath/"
register: iam_role
@@ -234,7 +234,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after Role creation
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -260,7 +260,7 @@
- role_info.iam_roles[0].tags | length == 0
- name: iam_role_info after Role creation (searching a path)
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
path_prefix: "{{ test_path }}"
register: role_info
- ansible.builtin.assert:
@@ -285,8 +285,34 @@
- role_info.iam_roles[0].role_name == test_role
- role_info.iam_roles[0].tags | length == 0
+- name: iam_role_info after Role creation (searching a path without / prefix and suffix)
+ amazon.aws.iam_role_info:
+ path_prefix: "{{ bad_test_path }}"
+ register: role_info
+- ansible.builtin.assert:
+ that:
+ - role_info is succeeded
+ - role_info.iam_roles | length == 1
+ - role_info.iam_roles[0].arn.startswith("arn")
+ - role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )
+ - '"assume_role_policy_document" in role_info.iam_roles[0]'
+ - '"create_date" in role_info.iam_roles[0]'
+ - '"description" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].inline_policies | length == 0
+ - role_info.iam_roles[0].instance_profiles | length == 1
+ - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
+ - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")
+ - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)
+ - role_info.iam_roles[0].managed_policies | length == 0
+ - role_info.iam_roles[0].max_session_duration == 3600
+ - '"permissions_boundary" not in role_info.iam_roles[0]'
+ - role_info.iam_roles[0].path == test_path
+ - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
+ - role_info.iam_roles[0].role_name == test_role
+ - role_info.iam_roles[0].tags | length == 0
+
- name: Remove IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
path: "{{ test_path }}"
@@ -297,7 +323,7 @@
- iam_role is changed
- name: iam_role_info after Role deletion
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -309,7 +335,7 @@
# (with Instance profile)
- name: Minimal IAM Role with instance profile - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: true
register: iam_role
@@ -319,7 +345,7 @@
- iam_role is changed
- name: Minimal IAM Role with instance profile
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: true
register: iam_role
@@ -339,7 +365,7 @@
- '"role_id" in iam_role.iam_role'
- name: Minimal IAM Role wth instance profile (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: true
register: iam_role
@@ -349,7 +375,7 @@
- iam_role is not changed
- name: Minimal IAM Role wth instance profile (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
create_instance_profile: true
register: iam_role
@@ -359,7 +385,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after Role creation
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml
index 0cb9a46af..edb88f7dd 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml
@@ -1,6 +1,6 @@
---
- name: Add Description (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role {{ resource_prefix }}
check_mode: true
@@ -10,7 +10,7 @@
- iam_role is changed
- name: Add Description
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role {{ resource_prefix }}
register: iam_role
@@ -21,7 +21,7 @@
- iam_role.iam_role.description == "Ansible Test Role "+resource_prefix
- name: Add Description (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role {{ resource_prefix }}
register: iam_role
@@ -31,7 +31,7 @@
- iam_role is not changed
- name: Add Description (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role {{ resource_prefix }}
register: iam_role
@@ -42,7 +42,7 @@
- iam_role.iam_role.description == "Ansible Test Role "+resource_prefix
- name: iam_role_info after adding Description
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -70,7 +70,7 @@
# ------------------------------------------------------------------------------------------
- name: Update Description (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role (updated) {{ resource_prefix }}
check_mode: true
@@ -80,7 +80,7 @@
- iam_role is changed
- name: Update Description
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role (updated) {{ resource_prefix }}
register: iam_role
@@ -91,7 +91,7 @@
- iam_role.iam_role.description == 'Ansible Test Role (updated) '+resource_prefix
- name: Update Description (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role (updated) {{ resource_prefix }}
register: iam_role
@@ -101,7 +101,7 @@
- iam_role is not changed
- name: Update Description (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
description: Ansible Test Role (updated) {{ resource_prefix }}
register: iam_role
@@ -112,7 +112,7 @@
- iam_role.iam_role.description == 'Ansible Test Role (updated) '+resource_prefix
- name: iam_role_info after updating Description
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml
index 0091045e8..5b3e42458 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml
@@ -14,7 +14,7 @@
policy_name: inline-policy-b
policy_json: '{{ lookup("file", "deny-all-b.json") }}'
- name: iam_role_info after attaching inline policies (using iam_policy)
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml
index b7a62db9f..21e25d9e3 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml
@@ -34,7 +34,7 @@
assume_deny_policy: '{{ lookup("file", "deny-assume.json") | from_json }}'
- ansible.builtin.include_tasks: parameter_checks.yml
- name: Create Safe IAM Managed Policy
- community.aws.iam_managed_policy:
+ amazon.aws.iam_managed_policy:
state: present
policy_name: "{{ custom_policy_name }}"
policy_description: A safe (deny-all) managed policy
@@ -60,23 +60,23 @@
# Cleanup
- name: Remove IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
ignore_errors: true
- name: Remove IAM Role (with path)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
path: "{{ test_path }}"
delete_instance_profile: true
ignore_errors: true
- name: iam_role_info after Role deletion
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
ignore_errors: true
- name: Remove test managed policy
- community.aws.iam_managed_policy:
+ amazon.aws.iam_managed_policy:
state: absent
policy_name: "{{ custom_policy_name }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml
index fe43bcfc8..576e6b24c 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml
@@ -1,6 +1,6 @@
---
- name: Update Max Session Duration (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 43200
check_mode: true
@@ -10,7 +10,7 @@
- iam_role is changed
- name: Update Max Session Duration
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 43200
register: iam_role
@@ -21,7 +21,7 @@
- iam_role.iam_role.max_session_duration == 43200
- name: Update Max Session Duration (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 43200
register: iam_role
@@ -30,7 +30,7 @@
- iam_role is not changed
- name: Update Max Session Duration (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 43200
register: iam_role
@@ -40,7 +40,7 @@
- iam_role is not changed
- name: iam_role_info after updating Max Session Duration
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml
index 2cf46eebf..545072674 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml
@@ -1,7 +1,7 @@
---
# Parameter Checks
- name: Friendly message when creating an instance profile and adding a boundary profile
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
boundary: "{{ boundary_policy }}"
register: iam_role
@@ -14,7 +14,7 @@
- '"false" in iam_role.msg'
- name: Friendly message when boundary profile is not an ARN
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
boundary: AWSDenyAll
create_instance_profile: false
@@ -28,7 +28,7 @@
- name: Friendly message when "present" without assume_role_policy_document
module_defaults: { amazon.aws.iam_role: {}}
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
register: iam_role
ignore_errors: true
@@ -39,7 +39,7 @@
- '"assume_role_policy_document" in iam_role.msg'
- name: Maximum Session Duration needs to be between 1 and 12 hours
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 3599
register: iam_role
@@ -50,7 +50,7 @@
- '"max_session_duration must be between" in iam_role.msg'
- name: Maximum Session Duration needs to be between 1 and 12 hours
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
max_session_duration: 43201
register: iam_role
@@ -61,7 +61,7 @@
- '"max_session_duration must be between" in iam_role.msg'
- name: Role Paths must start with /
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: test/
register: iam_role
@@ -72,7 +72,7 @@
- '"path must begin and end with /" in iam_role.msg'
- name: Role Paths must end with /
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
path: /test
register: iam_role
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml
index 4fa5cd6d2..27b911ae7 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml
@@ -1,6 +1,6 @@
---
- name: Add Managed Policy (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -12,7 +12,7 @@
- iam_role is changed
- name: Add Managed Policy
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -24,7 +24,7 @@
- iam_role.iam_role.role_name == test_role
- name: Add Managed Policy (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -36,7 +36,7 @@
- iam_role is not changed
- name: Add Managed Policy (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -48,7 +48,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after adding Managed Policy
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -80,7 +80,7 @@
# ------------------------------------------------------------------------------------------
- name: Update Managed Policy without purge (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -92,7 +92,7 @@
- iam_role is changed
- name: Update Managed Policy without purge
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -104,7 +104,7 @@
- iam_role.iam_role.role_name == test_role
- name: Update Managed Policy without purge (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -116,7 +116,7 @@
- iam_role is not changed
- name: Update Managed Policy without purge (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_policies: false
managed_policy:
@@ -128,7 +128,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after updating Managed Policy without purge
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -161,7 +161,7 @@
# Managed Policies are purged by default
- name: Update Managed Policy with purge (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
managed_policy:
- "{{ custom_policy_name }}"
@@ -172,7 +172,7 @@
- iam_role is changed
- name: Update Managed Policy with purge
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
managed_policy:
- "{{ custom_policy_name }}"
@@ -183,7 +183,7 @@
- iam_role.iam_role.role_name == test_role
- name: Update Managed Policy with purge (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
managed_policy:
- "{{ custom_policy_name }}"
@@ -194,7 +194,7 @@
- iam_role is not changed
- name: Update Managed Policy with purge (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
managed_policy:
- "{{ custom_policy_name }}"
@@ -205,7 +205,7 @@
- iam_role.iam_role.role_name == test_role
- name: iam_role_info after updating Managed Policy with purge
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml
index 8761bda73..f4e79252a 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml
@@ -1,6 +1,6 @@
---
- name: Remove IAM Role (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
@@ -11,7 +11,7 @@
- iam_role is changed
- name: iam_role_info after deleting role in check mode
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -20,7 +20,7 @@
- role_info.iam_roles | length == 1
- name: Remove IAM Role
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
@@ -30,7 +30,7 @@
- iam_role is changed
- name: iam_role_info after deleting role
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -39,7 +39,7 @@
- role_info.iam_roles | length == 0
- name: Remove IAM Role (should be gone already) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
@@ -50,7 +50,7 @@
- iam_role is not changed
- name: Remove IAM Role (should be gone already)
- community.aws.iam_role:
+ amazon.aws.iam_role:
state: absent
name: "{{ test_role }}"
delete_instance_profile: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml
index e74820d77..45d703cc1 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml
@@ -1,6 +1,6 @@
---
- name: Add Tag (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: ValueA
@@ -11,7 +11,7 @@
- iam_role is changed
- name: Add Tag
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: ValueA
@@ -25,7 +25,7 @@
- iam_role.iam_role.tags.TagA == "ValueA"
- name: Add Tag (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: ValueA
@@ -36,7 +36,7 @@
- iam_role is not changed
- name: Add Tag (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: ValueA
@@ -49,7 +49,7 @@
- iam_role.iam_role.tags.TagA == "ValueA"
- name: iam_role_info after adding Tags
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -79,7 +79,7 @@
# ------------------------------------------------------------------------------------------
- name: Update Tag (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: AValue
@@ -90,7 +90,7 @@
- iam_role is changed
- name: Update Tag
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: AValue
@@ -103,7 +103,7 @@
- iam_role.iam_role.tags.TagA == "AValue"
- name: Update Tag (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: AValue
@@ -114,7 +114,7 @@
- iam_role is not changed
- name: Update Tag (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
tags:
TagA: AValue
@@ -127,7 +127,7 @@
- iam_role.iam_role.tags.TagA == "AValue"
- name: iam_role_info after updating Tag
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -157,7 +157,7 @@
# ------------------------------------------------------------------------------------------
- name: Add second Tag without purge (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: false
tags:
@@ -169,7 +169,7 @@
- iam_role is changed
- name: Add second Tag without purge
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: false
tags:
@@ -183,7 +183,7 @@
- iam_role.iam_role.tags.TagB == "ValueB"
- name: Add second Tag without purge (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: false
tags:
@@ -195,7 +195,7 @@
- iam_role is not changed
- name: Add second Tag without purge (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: false
tags:
@@ -209,7 +209,7 @@
- iam_role.iam_role.tags.TagB == "ValueB"
- name: iam_role_info after adding second Tag without purge
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
@@ -241,7 +241,7 @@
# ------------------------------------------------------------------------------------------
- name: Purge first tag (CHECK MODE)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: true
tags:
@@ -253,7 +253,7 @@
- iam_role is changed
- name: Purge first tag
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: true
tags:
@@ -267,7 +267,7 @@
- iam_role.iam_role.tags.TagB == "ValueB"
- name: Purge first tag (no change) - check mode
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: true
tags:
@@ -278,7 +278,7 @@
- iam_role is not changed
- name: Purge first tag (no change)
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ test_role }}"
purge_tags: true
tags:
@@ -292,7 +292,7 @@
- iam_role.iam_role.tags.TagB == "ValueB"
- name: iam_role_info after purging first Tag
- community.aws.iam_role_info:
+ amazon.aws.iam_role_info:
name: "{{ test_role }}"
register: role_info
- ansible.builtin.assert:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml
index 675b9a5b1..870dd4931 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml
@@ -160,7 +160,7 @@
ansible.builtin.include_tasks: deletion.yml
always:
- name: Remove group
- community.aws.iam_group:
+ amazon.aws.iam_group:
name: "{{ test_group }}"
state: absent
ignore_errors: true # noqa: ignore-errors
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml
index c8e820aad..161fdc6e6 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml
@@ -31,7 +31,7 @@
# Create VPC, subnet, security group, and find image_id to create instance
- ansible.builtin.include_tasks: tasks/setup.yml
- name: Ensure IAM instance role exists
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ iam_role_name }}"
assume_role_policy_document: "{{ lookup('file', 'files/ec2-trust-policy.json') }}"
state: present
@@ -117,7 +117,7 @@
always:
- name: Delete IAM role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ iam_role_name }}"
state: absent
wait: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml
index ff97a1a09..d9ad140e9 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml
@@ -10,7 +10,7 @@
amazon.aws.aws_caller_info:
register: aws_caller_info
- name: Create an IAM role that can do nothing
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ kms_key_alias }}"
state: present
assume_role_policy_document:
@@ -353,7 +353,7 @@
pending_window: 7
ignore_errors: true # noqa: ignore-errors
- name: Remove the IAM role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ kms_key_alias }}"
state: absent
ignore_errors: true # noqa: ignore-errors
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml
index 1adb65094..8a0390615 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml
@@ -10,7 +10,7 @@
amazon.aws.aws_caller_info:
register: aws_caller_info
- name: Create an IAM role that can do nothing
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ kms_key_alias }}"
state: present
assume_role_policy_document:
@@ -286,7 +286,7 @@
pending_window: 7
ignore_errors: true # noqa: ignore-errors
- name: Remove the IAM role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ kms_key_alias }}"
state: absent
ignore_errors: true # noqa: ignore-errors
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml
index dd8392d20..3720b4d79 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml
@@ -16,7 +16,7 @@
when: (lookup('env', 'HOME'))
# Preparation
- name: create minimal lambda role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
create_instance_profile: false
@@ -807,7 +807,7 @@
- "{{ lambda_function_name }}_4"
- name: ensure role has been removed at end of test
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
state: absent
ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml
index d6b8e0d6e..e96aa8269 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml
@@ -12,7 +12,7 @@
# ==============================================================
# Preparation
- name: create minimal lambda role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
create_instance_profile: false
@@ -616,7 +616,7 @@
state: absent
ignore_errors: true
- name: ensure role has been removed at end of test
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
state: absent
delete_instance_profile: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml
index f06482a62..1e49d1373 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml
@@ -6,11 +6,10 @@
secret_key: "{{ aws_secret_key }}"
session_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}"
- collections:
- - community.general
block:
- name: Create test resources setup
ansible.builtin.import_tasks: setup.yml
+
- name: Create DynamoDB stream event mapping (trigger) - check_mode
amazon.aws.lambda_event:
state: present
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml
index fa2668fd5..1f77a5e40 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml
@@ -33,7 +33,7 @@
when: (lookup('env', 'HOME'))
- name: create minimal lambda role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}'
create_instance_profile: false
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml
index 476465a6e..2f13e1de4 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml
@@ -26,7 +26,7 @@
state: absent
- name: Delete the role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}'
state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml
index c3c73aaf2..e222f9fa6 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml
@@ -12,7 +12,7 @@
- community.aws
block:
- name: create minimal lambda role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}'
create_instance_profile: false
@@ -50,7 +50,7 @@
path: "{{ output_dir }}/mini_http_lambda.py"
dest: "{{ output_dir }}/mini_http_lambda.zip"
- name: create minimal lambda role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: ansible_lambda_role
assume_role_policy_document: "{{ lookup('file', 'minimal_trust_policy.json', convert_data=False) }}"
create_instance_profile: false
@@ -143,7 +143,7 @@
register: destroy_result
ignore_errors: true
- name: Clean up test role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ lambda_role_name }}"
state: absent
ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/aliases
new file mode 100644
index 000000000..e26025382
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/aliases
@@ -0,0 +1,3 @@
+cloud/aws
+rds_cluster_param_group_info
+rds_engine_versions_info
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/defaults/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/defaults/main.yaml
new file mode 100644
index 000000000..8758e70cf
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+rds_cluster_param_group_name: "{{ resource_prefix }}-cluster-param-group"
+rds_engine: postgres
+resource_tags:
+ resource_prefix: "{{ resource_prefix }}"
+ some: tag
+ another: tag
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/tasks/main.yaml
new file mode 100644
index 000000000..3eb85f617
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_param_group/tasks/main.yaml
@@ -0,0 +1,328 @@
+---
+- module_defaults:
+ group/aws:
+ region: "{{ aws_region }}"
+ access_key: "{{ aws_access_key }}"
+ secret_key: "{{ aws_secret_key }}"
+ session_token: "{{ security_token | default(omit) }}"
+
+ block:
+ - name: Gather information about RDS engine version
+ amazon.aws.rds_engine_versions_info:
+ engine: "{{ rds_engine }}"
+ default_only: true
+ register: engine_versions
+
+ - name: Set variable for RDS param group family
+ ansible.builtin.set_fact:
+ dbparam_group_family: "{{ engine_versions.db_engine_versions.0.db_parameter_group_family }}"
+
+ # Test create using check_mode=true
+ - name: Create RDS cluster parameter group (check_mode=true)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ check_mode: true
+ register: create_checkmode
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Assert that the RDS cluster parameter was not created (using check_mode=true)
+ ansible.builtin.assert:
+ that:
+ - create_checkmode is changed
+ - cluster_params.db_cluster_parameter_groups | length == 0
+
+ # Test create RDS cluster parameter group
+ - name: Create RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ register: create_group
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Assert that the RDS cluster parameter was created
+ ansible.builtin.assert:
+ that:
+ - create_group is changed
+ - create_group.db_cluster_parameter_group.db_cluster_parameter_group_arn
+ - create_group.db_cluster_parameter_group.db_cluster_parameter_group_name == rds_cluster_param_group_name
+ - create_group.db_cluster_parameter_group.db_parameter_group_family == dbparam_group_family
+ - cluster_params.db_cluster_parameter_groups | length == 1
+
+ # Test create RDS cluster parameter group (idempotency)
+ - name: Create RDS cluster parameter group (idempotency)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ register: create_idempotency
+
+ - name: Validate that module did not report change
+ ansible.builtin.assert:
+ that:
+ - create_idempotency is not changed
+
+ # Test adding tag to existing RDS cluster parameter group (check_mode=true)
+ - name: Update existing RDS cluster parameter group with tags (check_mode=true)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ tags: "{{ resource_tags }}"
+ register: create_tag
+ check_mode: true
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Validate that the resource has not been updated with tags (check_mode)
+ ansible.builtin.assert:
+ that:
+ - create_tag is changed
+ - cluster_params.db_cluster_parameter_groups.0.tags == {}
+
+ # Test adding tag to existing RDS cluster parameter group
+ - name: Update existing RDS cluster parameter group with tags
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ tags: "{{ resource_tags }}"
+ register: create_tag
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Validate that the resource has been updated with tags
+ ansible.builtin.assert:
+ that:
+ - create_tag is changed
+ - cluster_params.db_cluster_parameter_groups.0.tags == resource_tags
+
+ # Test adding tag to existing RDS cluster parameter group (idempotency)
+ - name: Update existing RDS cluster parameter group with tags (idempotency)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ tags: "{{ resource_tags }}"
+ register: create_tag_idempotency
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Validate that the module did not report change and the resource tag remain unchanged
+ ansible.builtin.assert:
+ that:
+ - create_tag_idempotency is not changed
+ - cluster_params.db_cluster_parameter_groups.0.tags == resource_tags
+
+ # Test adding not modifiable parameter
+ - name: Update RDS cluster param group with not modifiable parameter
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: archive_library
+ parameter_value: test
+ apply_method: immediate
+ register: not_modifiable
+ ignore_errors: true
+
+ - name: Ensure module failed to update not modifiable parameter
+ ansible.builtin.assert:
+ that:
+ - not_modifiable is failed
+ - '"The parameter archive_library cannot be modified" in not_modifiable.msg'
+
+ # Test adding invalid parameter
+ - name: Update RDS cluster param group with invalid parameter
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: invalid_fake
+ parameter_value: test
+ apply_method: immediate
+ register: invalid_param
+ ignore_errors: true
+
+ - name: Ensure module failed to update invalid parameter
+ ansible.builtin.assert:
+ that:
+ - invalid_param is failed
+ - '"Could not find parameter with name: invalid_fake" in invalid_param.msg'
+
+ # Test Modify parameters
+ - name: Modify RDS cluster parameter group with new parameters (check_mode)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: array_nulls
+ parameter_value: "0"
+ apply_method: immediate
+ - parameter_name: authentication_timeout
+ parameter_value: "50"
+ apply_method: immediate
+ register: update_param_check_mode
+ check_mode: true
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ include_parameters: all
+ register: initial_params
+ no_log: true # very spammy
+
+ - name: Assert that the task executed in check_mode reported change, while the parameters remain unchanged
+ ansible.builtin.assert:
+ that:
+ - update_param_check_mode is changed
+ - "'parameter_value' not in array_nulls_param"
+ - "'parameter_value' not in auth_timeout_param"
+ vars:
+ array_nulls_param: "{{ initial_params.db_cluster_parameter_groups.0.db_parameters | selectattr('parameter_name', 'equalto', 'array_nulls') | first }}"
+ auth_timeout_param: "{{ initial_params.db_cluster_parameter_groups.0.db_parameters | selectattr('parameter_name', 'equalto', 'authentication_timeout') | first }}"
+
+ - name: Modify RDS cluster parameter group with new parameters
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: array_nulls
+ parameter_value: "0"
+ apply_method: immediate
+ - parameter_name: authentication_timeout
+ parameter_value: "50"
+ apply_method: immediate
+ register: update_parameters
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ include_parameters: user
+ register: cluster_params
+
+ - name: Assert that the parameters are updated correctly
+ ansible.builtin.assert:
+ that:
+ - update_parameters is changed
+ - cluster_params.db_cluster_parameter_groups.0.db_parameters | selectattr('parameter_name', 'equalto', 'array_nulls') | first | community.general.json_query('parameter_value') == "0"
+ - cluster_params.db_cluster_parameter_groups.0.db_parameters | selectattr('parameter_name', 'equalto', 'authentication_timeout') | first | community.general.json_query('parameter_value') == "50"
+
+ # Test Modify parameters (idempotency)
+ - name: Modify RDS cluster parameter group with new parameters (idempotency with check_mode)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: array_nulls
+ parameter_value: "0"
+ apply_method: immediate
+ - parameter_name: authentication_timeout
+ parameter_value: "50"
+ apply_method: immediate
+ register: update_idempotency_check_mode
+
+ - name: Ensure task executed using check_mode did not reported change
+ ansible.builtin.assert:
+ that:
+ - update_idempotency_check_mode is not changed
+
+ - name: Modify RDS cluster parameter group with new parameters (idempotency)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ db_parameter_group_family: "{{ dbparam_group_family }}"
+ description: "RDS cluster param group for Engine {{ engine_versions.db_engine_versions.0.engine_version }}"
+ parameters:
+ - parameter_name: array_nulls
+ parameter_value: "0"
+ apply_method: immediate
+ - parameter_name: authentication_timeout
+ parameter_value: "50"
+ apply_method: immediate
+ register: update_idempotency
+
+ - name: Ensure module did not report change
+ ansible.builtin.assert:
+ that:
+ - update_idempotency is not changed
+
+ # Test delete RDS cluster parameter group (check_mode=true)
+ - name: Delete existing RDS cluster parameter group (check_mode=true)
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ state: absent
+ register: delete_cluster_param_checkmode
+ check_mode: true
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Validate that module execution reported change but the RDS cluster param group was not deleted
+ assert:
+ that:
+ - delete_cluster_param_checkmode is changed
+ - cluster_params.db_cluster_parameter_groups | length == 1
+
+ # Test delete RDS cluster parameter group
+ - name: Delete existing RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ state: absent
+ register: delete_cluster_param
+
+ - name: Describe RDS parameter group
+ amazon.aws.rds_cluster_param_group_info:
+ name: "{{ rds_cluster_param_group_name }}"
+ register: cluster_params
+
+ - name: Validate that module execution reported change and the RDS cluster param group is deleted
+ ansible.builtin.assert:
+ that:
+ - delete_cluster_param is changed
+ - cluster_params.db_cluster_parameter_groups | length == 0
+
+ # Test delete RDS cluster parameter group (idempotency)
+ - name: Delete existing RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ state: absent
+ register: delete_cluster_param
+
+ - name: Ensure module did not report change
+ ansible.builtin.assert:
+ that:
+ - delete_cluster_param is not changed
+
+ always:
+ - name: Delete existing RDS cluster parameter group
+ amazon.aws.rds_cluster_param_group:
+ name: "{{ rds_cluster_param_group_name }}"
+ state: absent
+ register: delete_cluster_param
+ ignore_errors: true
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml
index f2b794609..c7b6d0de9 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml
@@ -14,4 +14,4 @@ storage_type: io1
iops: 1000
# For mariadb tests
-mariadb_engine_version: 10.6.10
+mariadb_engine_version: 10.11.7
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml
index e4d9daa60..51ff777ea 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml
@@ -23,7 +23,7 @@
ignore_errors: true
- name: Create an enhanced monitoring role
- community.aws.iam_role:
+ amazon.aws.iam_role:
assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json') }}"
name: "{{ instance_id }}-role"
state: present
@@ -91,7 +91,7 @@
preferred_maintenance_window: "{{ preferred_maintenance_window }}"
auto_minor_version_upgrade: false
monitoring_interval: "{{ monitoring_interval }}"
- monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}"
+ monitoring_role_arn: "{{ enhanced_monitoring_role.iam_role.arn }}"
iops: "{{ iops }}"
port: 1150
max_allocated_storage: 150
@@ -115,7 +115,7 @@
preferred_maintenance_window: "{{ preferred_maintenance_window }}"
auto_minor_version_upgrade: false
monitoring_interval: "{{ monitoring_interval }}"
- monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}"
+ monitoring_role_arn: "{{ enhanced_monitoring_role.iam_role.arn }}"
iops: "{{ iops }}"
port: 1150
max_allocated_storage: 150
@@ -143,7 +143,7 @@
preferred_maintenance_window: "{{ preferred_maintenance_window }}"
auto_minor_version_upgrade: false
monitoring_interval: "{{ monitoring_interval }}"
- monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}"
+ monitoring_role_arn: "{{ enhanced_monitoring_role.iam_role.arn }}"
iops: "{{ iops }}"
port: 1150
max_allocated_storage: 150
@@ -166,7 +166,7 @@
preferred_maintenance_window: "{{ preferred_maintenance_window }}"
auto_minor_version_upgrade: false
monitoring_interval: "{{ monitoring_interval }}"
- monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}"
+ monitoring_role_arn: "{{ enhanced_monitoring_role.iam_role.arn }}"
iops: "{{ iops }}"
port: 1150
max_allocated_storage: 150
@@ -190,7 +190,7 @@
ignore_errors: true
- name: Remove enhanced monitoring role
- community.aws.iam_role:
+ amazon.aws.iam_role:
assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json') }}"
name: "{{ instance_id }}-role"
state: absent
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml
index 0384232d5..0e09b9c4c 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml
@@ -7,4 +7,4 @@ db_instance_class: db.t3.micro
allocated_storage: 20
# For mariadb tests
-mariadb_engine_version: 10.6.10
+mariadb_engine_version: 10.11.7
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml
index 4e33789f3..e440bbc42 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml
@@ -82,7 +82,7 @@
- '"is not valid for adding IAM roles" in result.msg'
# TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name,
- # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration
+ # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name
# ------------------------------------------------------------------------------------------
- name: Modify the storage type without immediate application - check_mode
@@ -306,6 +306,193 @@
- db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1"
# Test modifying CA certificate identifier Complete-------------------------------------------
+ # Test modifying cloudwatch log exports -------------------------------------------
+ - name: Enable all cloudwatch log exports - check_mode
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: ["audit", "error", "general", "slowquery"]
+ register: result
+ check_mode: true
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ - name: Assert that cloudwatch log exports has been modified - check_mode
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports is not defined
+
+ - name: Enable all cloudwatch log exports
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: ["audit", "error", "general", "slowquery"]
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ # It applies immediately but takes a couple seconds
+ until:
+ - db_info.instances[0].db_instance_status == 'available'
+ - not db_info.instances[0].pending_modified_values
+ retries: 10
+ delay: 20
+
+ - name: Assert that cloudwatch log exports has been modified
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 4
+
+ - name: Enable all cloudwatch log exports - idempotent
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: ["audit", "error", "general", "slowquery"]
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ - name: Assert that cloudwatch log exports has not been modified
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+ - result is not failed
+ - not result.pending_modified_values
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 4
+
+ - name: Disable some cloudwatch log exports - check_mode
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: ["audit", "error"]
+ register: result
+ check_mode: true
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ - name: Assert that cloudwatch log exports has been modified - check_mode
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 4
+
+ - name: Disable some cloudwatch log exports
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: ["audit", "error"]
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ until:
+ - db_info.instances[0].db_instance_status == 'available'
+ - not db_info.instances[0].pending_modified_values
+ retries: 10
+ delay: 20
+
+ - name: Assert that cloudwatch log exports has been modified
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 2
+
+ - name: Disable all cloudwatch log exports - no purge
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: []
+ purge_cloudwatch_logs_exports: false
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ - name: Assert that cloudwatch log exports has not been modified
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+ - result is not failed
+ - not result.pending_modified_values
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 2
+
+ - name: Disable all cloudwatch log exports - check_mode
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: []
+ register: result
+ check_mode: true
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ - name: Assert that cloudwatch log exports has been modified - check_mode
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports | length == 2
+
+ - name: Disable all cloudwatch log exports
+ amazon.aws.rds_instance:
+ state: present
+ db_instance_identifier: "{{ modified_instance_id }}"
+ enable_cloudwatch_logs_exports: []
+ register: result
+ vars:
+ ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}"
+
+ - name: Get current cloudwatch log exports
+ amazon.aws.rds_instance_info:
+ db_instance_identifier: "{{ modified_instance_id }}"
+ register: db_info
+ until:
+ - db_info.instances[0].db_instance_status == 'available'
+ - not db_info.instances[0].pending_modified_values
+ retries: 10
+ delay: 20
+
+ - name: Assert that cloudwatch log exports has been modified
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - result is not failed
+ - db_info.instances[0].enabled_cloudwatch_logs_exports is not defined
+ # Test modifying cloudwatch log exports Complete-------------------------------------------
+
always:
- name: Delete the instance
amazon.aws.rds_instance:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml
index d193876e7..e82f395a8 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml
@@ -8,7 +8,7 @@ password: "{{ lookup('password', '/dev/null') }}"
db_instance_class: db.t3.micro
allocated_storage: 10
engine: mariadb
-mariadb_engine_version: 10.6.10
+mariadb_engine_version: 10.11.7
# Create snapshot
snapshot_id: "{{ instance_id }}-snapshot"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml
index e0f04005f..70a5946bf 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml
@@ -1,7 +1,7 @@
---
option_group_name: "{{ resource_prefix }}rds-option-group"
engine_name: mysql
-major_engine_version: 5.6
+major_engine_version: 8.0
option_group_description: "{{ resource_prefix }}rds-option-group test"
instance_id: "{{ resource_prefix }}"
username: test
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml
index 663ee68df..4751e56e8 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml
@@ -22,7 +22,7 @@
block:
# ============================================================
- name: test empty parameter group - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -36,7 +36,7 @@
- result.changed
- name: test empty parameter group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -54,7 +54,7 @@
# ============================================================
- name: test empty parameter group with no arguments changes nothing - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -68,7 +68,7 @@
- not result.changed
- name: test empty parameter group with no arguments changes nothing
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -82,7 +82,7 @@
# ============================================================
- name: test adding numeric tag - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -98,7 +98,7 @@
that:
- result.changed
- name: test adding numeric tag
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -123,7 +123,7 @@
# ============================================================
- name: test modifying rds parameter group engine/family (warning displayed)
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine_to_modify_to }}"
description: "{{ rds_param_group.description }}"
@@ -143,7 +143,7 @@
# ============================================================
- name: test tagging existing group - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -160,7 +160,7 @@
that:
- result.changed
- name: test tagging existing group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -186,7 +186,7 @@
# ============================================================
- name: test repeating tagging existing group - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -212,7 +212,7 @@
- result.tags["NewTag"] == 'hello'
- name: test repeating tagging existing group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -238,7 +238,7 @@
# ============================================================
- name: test deleting tags from existing group - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -254,7 +254,7 @@
that:
- result.changed
- name: test deleting tags from existing group
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -277,7 +277,7 @@
# ============================================================
- name: test state=absent with engine defined (expect changed=true) - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -290,7 +290,7 @@
- result.changed
- name: test state=absent with engine defined (expect changed=true)
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -303,7 +303,7 @@
# ============================================================
- name: test creating group with parameters - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -325,7 +325,7 @@
- result.changed
- name: test creating group with parameters
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -355,7 +355,7 @@
# ============================================================
- name: test repeating group with parameters - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -377,7 +377,7 @@
- not result.changed
- name: test repeating group with parameters
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -407,7 +407,7 @@
# ============================================================
- name: test state=absent with engine defined (expect changed=true) - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -419,7 +419,7 @@
that:
- result.changed
- name: test state=absent with engine defined (expect changed=true)
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -432,7 +432,7 @@
# ============================================================
- name: test repeating state=absent (expect changed=false) - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -445,7 +445,7 @@
that:
- not result.changed
- name: test repeating state=absent (expect changed=false)
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
state: absent
@@ -459,7 +459,7 @@
# ============================================================
- name: test creating group with more than 20 parameters - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -473,7 +473,7 @@
that:
- result.changed
- name: test creating group with more than 20 parameters
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -488,7 +488,7 @@
# ============================================================
- name: test creating group with more than 20 parameters - CHECK_MODE
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -502,7 +502,7 @@
that:
- not result.changed
- name: test creating group with more than 20 parameters
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
engine: "{{ rds_param_group.engine }}"
description: "{{ rds_param_group.description }}"
@@ -518,7 +518,7 @@
always:
# ============================================================
- name: test state=absent (expect changed=false)
- amazon.aws.rds_param_group:
+ amazon.aws.rds_instance_param_group:
name: "{{ rds_param_group.name }}"
state: absent
register: result
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml
index f532c13a1..1b5c3172e 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Test community.aws.aws_s3_bucket_info
+- name: Test s3_bucket_info
module_defaults:
group/aws:
access_key: "{{ aws_access_key }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/library/test_s3_upload_multipart.py b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/library/test_s3_upload_multipart.py
new file mode 100644
index 000000000..a95275865
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/library/test_s3_upload_multipart.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION = r"""
+---
+module: test_s3_upload_multipart
+short_description: Create object using multipart upload
+description:
+ - This module is used to create object into S3 bucket using multipart upload.
+ - Multipart upload allows to upload a single object as a set of parts.
+ - This module is exclusively used to test collection `amazon.aws`.
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ object:
+ description:
+ - Key name of the object.
+ type: str
+ required: true
+ part_size:
+ description:
+ - Part size in MB.
+ type: int
+ default: 10
+ parts:
+ description:
+ - Number of parts.
+ type: int
+ default: 6
+author:
+ - "Aubin Bikouo (@abikouo)"
+extends_documentation_fragment:
+ - amazon.aws.common.modules
+ - amazon.aws.region.modules
+ - amazon.aws.tags
+ - amazon.aws.boto3
+"""
+
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+import random
+import string
+
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params
+
+
+def generate_content(length):
+ return "".join([random.choice(string.ascii_letters + string.digits) for i in range(length)])
+
+
+def updload_parts(s3, parts, part_size, **kwargs):
+ multiparts = []
+ for part_id in range(1, parts + 1):
+ response = s3.upload_part(
+ Body=str.encode(generate_content(part_size * 1024 * 1024)), PartNumber=part_id, **kwargs
+ )
+
+ multiparts.append(
+ {
+ "PartNumber": part_id,
+ "ETag": response.get("ETag"),
+ }
+ )
+ return multiparts
+
+
+def main():
+ argument_spec = dict(
+ bucket=dict(required=True),
+ object=dict(required=True),
+ parts=dict(type="int", default=6),
+ part_size=dict(type="int", default=10),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bucket = module.params.get("bucket")
+ object = module.params.get("object")
+ part_size = module.params.get("part_size")
+ parts = module.params.get("parts")
+
+ extra_params = s3_extra_params(module.params)
+ retry_decorator = AWSRetry.jittered_backoff()
+ try:
+ s3 = module.client("s3", retry_decorator=retry_decorator, **extra_params)
+ except (
+ botocore.exceptions.ClientError,
+ botocore.exceptions.BotoCoreError,
+ boto3.exceptions.Boto3Error,
+ ) as e:
+ module.fail_json_aws(e, msg="Failed to connect to AWS")
+
+ # create multipart upload
+ response = s3.create_multipart_upload(Bucket=bucket, Key=object)
+ upload_id = response.get("UploadId")
+
+ # upload parts
+ upload_params = {
+ "Bucket": bucket,
+ "Key": object,
+ "UploadId": upload_id,
+ }
+
+ multiparts = updload_parts(s3, parts, part_size, **upload_params)
+
+ # complete the upload
+ response = s3.complete_multipart_upload(
+ Bucket=bucket,
+ Key=object,
+ MultipartUpload={"Parts": multiparts},
+ UploadId=upload_id,
+ )
+
+ response.pop("ResponseMetadata", None)
+ module.exit_json(changed=True, s3_object=snake_dict_to_camel_dict(response))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_multipart_upload.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_multipart_upload.yml
new file mode 100644
index 000000000..edab77b6e
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_multipart_upload.yml
@@ -0,0 +1,185 @@
+- name: Test copying object create using multipart upload
+ vars:
+ test_bucket: "{{ resource_prefix }}-multipart"
+ test_versioned_bucket: "{{ resource_prefix }}-multipart-versioned"
+ obj_metadata:
+ some: meta_info
+ test: ci
+ block:
+ # Create Sample bucket
+ - name: Create bucket with name '{{ test_bucket }}'
+ amazon.aws.s3_bucket:
+ name: "{{ test_bucket }}"
+ state: present
+
+ - name: Create object into bucket using multipart upload
+ test_s3_upload_multipart:
+ access_key: "{{ aws_access_key }}"
+ secret_key: "{{ aws_secret_key }}"
+ session_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+
+ # Test copying multipart-uploaded object
+ - name: Copy object
+ amazon.aws.s3_object:
+ bucket: "{{ test_bucket }}"
+ mode: copy
+ object: multipart_1_copy
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ register: _copy
+
+ - name: Ensure module reported change
+ ansible.builtin.assert:
+ that:
+ - _copy is changed
+
+ - name: Retrieve object info
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ test_bucket }}"
+ object_name: multipart_1_copy
+ register: obj_info
+
+ - name: Ensure object has been created
+ ansible.builtin.assert:
+ that:
+ - obj_info.object_info | length == 1
+
+ - name: Copy object once again (idempotency)
+ amazon.aws.s3_object:
+ bucket: "{{ test_bucket }}"
+ mode: copy
+ object: multipart_1_copy
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ register: copy_idempotency
+
+ - name: Ensure module did not reported change
+ ansible.builtin.assert:
+ that:
+ - copy_idempotency is not changed
+
+ # Update object with metadata
+ - name: Copy object with metadata
+ amazon.aws.s3_object:
+ bucket: "{{ test_bucket }}"
+ mode: copy
+ object: multipart_1_copy
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ metadata: "{{ obj_metadata }}"
+ register: _copymetadata
+
+ - name: Ensure module reported change
+ ansible.builtin.assert:
+ that:
+ - _copymetadata is changed
+
+ - name: Retrieve object info
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ test_bucket }}"
+ object_name: multipart_1_copy
+ register: obj_info
+
+ - name: Ensure object has been created
+ ansible.builtin.assert:
+ that:
+ - obj_info.object_info | length == 1
+ - obj_info.object_info.0.object_data.metadata == obj_metadata
+
+ # Test copy with metadata (idempotency)
+ - name: Copy object with metadata once again (idempotency)
+ amazon.aws.s3_object:
+ bucket: "{{ test_bucket }}"
+ mode: copy
+ object: multipart_1_copy
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ metadata: "{{ obj_metadata }}"
+ register: copy_idempotency
+
+ - name: Ensure module did not reported change
+ ansible.builtin.assert:
+ that:
+ - copy_idempotency is not changed
+
+ - name: Retrieve object info
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ test_bucket }}"
+ object_name: multipart_1_copy
+ register: obj_info
+
+ - name: Ensure object has been created
+ ansible.builtin.assert:
+ that:
+ - obj_info.object_info | length == 1
+ - obj_info.object_info.0.object_data.metadata == obj_metadata
+
+ # Test copying multipart-uploaded object into bucket with versioning activated
+ # Create bucket with versioning activated
+ - name: Create bucket with name '{{ test_versioned_bucket }}'
+ amazon.aws.s3_bucket:
+ name: "{{ test_versioned_bucket }}"
+ versioning: true
+ state: present
+
+ - name: Copy object into bucket with versioning activated
+ amazon.aws.s3_object:
+ bucket: "{{ test_versioned_bucket }}"
+ mode: copy
+ object: multipart_2
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ register: _copy
+
+ - name: Get objects info from bucket with versioning activated
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ test_versioned_bucket }}"
+ object_name: multipart_2
+ register: obj_info_1
+
+ - name: Ensure object was copied and object info contain versioning information
+ ansible.builtin.assert:
+ that:
+ - _copy is changed
+ - obj_info_1.object_info.0.object_data.version_id
+
+ # test copy idempotency with versioned bucket
+ - name: Copy object into bucket with versioning activated (once again)
+ amazon.aws.s3_object:
+ bucket: "{{ test_versioned_bucket }}"
+ mode: copy
+ object: multipart_2
+ copy_src:
+ bucket: "{{ test_bucket }}"
+ object: multipart_1
+ register: _copy_idempotency
+
+ - name: Get objects info from bucket with versioning activated
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ test_versioned_bucket }}"
+ object_name: multipart_2
+ register: obj_info_2
+
+ - name: Validate that module did not reported change and object versioned remains unchanged
+ ansible.builtin.assert:
+ that:
+ - _copy_idempotency is not changed
+ - obj_info_1.object_info.0.object_data.version_id == obj_info_2.object_info.0.object_data.version_id
+
+ always:
+ - name: Delete buckets
+ amazon.aws.s3_bucket:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ with_items:
+ - "{{ test_versioned_bucket }}"
+ - "{{ test_bucket }}"
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml
index 994733d81..cfb5e13f6 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml
@@ -6,6 +6,9 @@
metacopy_data:
name: metacopy
version: "1.0.3"
+ another_metadata:
+ another: meta
+ mode: copy
block:
- name: define bucket name used for tests
ansible.builtin.set_fact:
@@ -211,6 +214,54 @@
- obj_info.results | selectattr('item', 'equalto', 'metacopy') | map(attribute='object_info.0.object_data.metadata') | first == metacopy_data
- obj_info.results | selectattr('item', 'equalto', 'copywithmeta') | map(attribute='object_info.0.object_data.metadata') | first == withmeta_data
+ # Validate copy idempotency with metadata
+ - name: Copy same object including metadata (check_mode=true)
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: copy
+ object: copywithmeta
+ copy_src:
+ bucket: "{{ bucket_name }}"
+ object: withmeta
+ metadata: "{{ another_metadata }}"
+ register: copy_with_metadata_checkmode
+ check_mode: true
+
+ - name: Get objects info
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ bucket_name }}"
+ object_name: copywithmeta
+ register: obj_info
+
+ - name: Validate that objects module reported change but metadata was not updated
+ ansible.builtin.assert:
+ that:
+ - copy_with_metadata_checkmode is changed
+ - obj_info.object_info.0.object_data.metadata == withmeta_data
+
+ - name: Copy same object including metadata
+ amazon.aws.s3_object:
+ bucket: "{{ bucket_name }}"
+ mode: copy
+ object: copywithmeta
+ copy_src:
+ bucket: "{{ bucket_name }}"
+ object: withmeta
+ metadata: "{{ another_metadata }}"
+ register: copy_with_metadata
+
+ - name: Get objects info
+ amazon.aws.s3_object_info:
+ bucket_name: "{{ bucket_name }}"
+ object_name: copywithmeta
+ register: obj_info
+
+ - name: Validate that objects module reported change and metadata was updated
+ ansible.builtin.assert:
+ that:
+ - copy_with_metadata is changed
+ - obj_info.object_info.0.object_data.metadata == another_metadata
+
always:
- ansible.builtin.include_tasks: delete_bucket.yml
with_items:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml
index 7a8a585de..70041d36a 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml
@@ -1071,9 +1071,10 @@
- "'tags' in result"
- (result.tags | length) == 0
- - ansible.builtin.include_tasks: copy_recursively.yml
- - ansible.builtin.include_tasks: copy_object.yml
- - ansible.builtin.include_tasks: copy_object_acl_disabled_bucket.yml
+ - ansible.builtin.include_tasks: copy_recursively.yml
+ - ansible.builtin.include_tasks: copy_object.yml
+ - ansible.builtin.include_tasks: copy_object_acl_disabled_bucket.yml
+ - ansible.builtin.include_tasks: copy_multipart_upload.yml
always:
- name: delete temporary files
file:
diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml
index 807a422c9..7fdeb71e5 100644
--- a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml
+++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml
@@ -22,7 +22,7 @@
# ============================================================
- name: create test iam role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ iam_role_name }}"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
create_instance_profile: false
@@ -247,7 +247,7 @@
# ============================================================
- name: test that assumed credentials have IAM read-only access
- community.aws.iam_role:
+ amazon.aws.iam_role:
access_key: "{{ assumed_role.sts_creds.access_key }}"
secret_key: "{{ assumed_role.sts_creds.secret_key }}"
session_token: "{{ assumed_role.sts_creds.session_token }}"
@@ -266,7 +266,7 @@
# ============================================================
- name: test assumed role with unprivileged action
- community.aws.iam_role:
+ amazon.aws.iam_role:
access_key: "{{ assumed_role.sts_creds.access_key }}"
secret_key: "{{ assumed_role.sts_creds.secret_key }}"
session_token: "{{ assumed_role.sts_creds.session_token }}"
@@ -295,7 +295,7 @@
# ============================================================
always:
- name: delete test iam role
- community.aws.iam_role:
+ amazon.aws.iam_role:
name: "{{ iam_role_name }}"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
delete_instance_profile: true
diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.18.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.18.txt
new file mode 100644
index 000000000..09a7e9cbb
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.18.txt
@@ -0,0 +1 @@
+plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this
diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py
index 9f3e4194b..a5ce452fc 100644
--- a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py
+++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py
@@ -209,3 +209,71 @@ class TestIsBoto3ErrorCode:
assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
assert issubclass(returned_exception, Exception)
assert returned_exception.__name__ == "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_tuple__pass__client(self):
+ passed_exception = self._make_denied_exception()
+ returned_exception = is_boto3_error_code(("NotAccessDenied", "AccessDenied"), e=passed_exception)
+ assert isinstance(passed_exception, returned_exception)
+ assert issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ != "NeverEverRaisedException"
+
+ returned_exception = is_boto3_error_code(("AccessDenied", "NotAccessDenied"), e=passed_exception)
+ assert isinstance(passed_exception, returned_exception)
+ assert issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ != "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_tuple__pass__unexpected(self):
+ passed_exception = self._make_unexpected_exception()
+ returned_exception = is_boto3_error_code(("NotAccessDenied", "AccessDenied"), e=passed_exception)
+ assert not isinstance(passed_exception, returned_exception)
+ assert not issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ == "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_tuple__pass__botocore(self):
+ passed_exception = self._make_botocore_exception()
+ returned_exception = is_boto3_error_code(("NotAccessDenied", "AccessDenied"), e=passed_exception)
+ assert not isinstance(passed_exception, returned_exception)
+ assert not issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ == "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_set__pass__client(self):
+ passed_exception = self._make_denied_exception()
+ returned_exception = is_boto3_error_code({"NotAccessDenied", "AccessDenied"}, e=passed_exception)
+ assert isinstance(passed_exception, returned_exception)
+ assert issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ != "NeverEverRaisedException"
+
+ returned_exception = is_boto3_error_code({"AccessDenied", "NotAccessDenied"}, e=passed_exception)
+ assert isinstance(passed_exception, returned_exception)
+ assert issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ != "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_set__pass__unexpected(self):
+ passed_exception = self._make_unexpected_exception()
+ returned_exception = is_boto3_error_code({"NotAccessDenied", "AccessDenied"}, e=passed_exception)
+ assert not isinstance(passed_exception, returned_exception)
+ assert not issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ == "NeverEverRaisedException"
+
+ def test_is_boto3_error_code_set__pass__botocore(self):
+ passed_exception = self._make_botocore_exception()
+ returned_exception = is_boto3_error_code({"NotAccessDenied", "AccessDenied"}, e=passed_exception)
+ assert not isinstance(passed_exception, returned_exception)
+ assert not issubclass(returned_exception, botocore.exceptions.ClientError)
+ assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError)
+ assert issubclass(returned_exception, Exception)
+ assert returned_exception.__name__ == "NeverEverRaisedException"
diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_resource_transforms.py b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_resource_transforms.py
index 28090f993..0a6830311 100644
--- a/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_resource_transforms.py
+++ b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_resource_transforms.py
@@ -451,10 +451,10 @@ class TestIamResourceToAnsibleDict:
OUTPUT = {
"arn": "arn:aws:iam::123456789012:role/ansible-test-76640355",
"assume_role_policy_document": {
- "statement": [
- {"action": "sts:AssumeRole", "effect": "Deny", "principal": {"service": "ec2.amazonaws.com"}}
+ "Statement": [
+ {"Action": "sts:AssumeRole", "Effect": "Deny", "Principal": {"Service": "ec2.amazonaws.com"}}
],
- "version": "2012-10-17",
+ "Version": "2012-10-17",
},
"assume_role_policy_document_raw": {
"Statement": [
diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py
index c61de1391..688514f59 100644
--- a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py
+++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py
@@ -70,7 +70,7 @@ def test_region(monkeypatch, stdin):
aws_module = utils_module.AnsibleAWSModule(argument_spec=dict())
assert aws_module.region is sentinel.RETURNED_REGION
- assert get_aws_region.call_args == call(aws_module, True)
+ assert get_aws_region.call_args == call(aws_module)
@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"])
@@ -129,7 +129,7 @@ def test_client_no_wrapper(monkeypatch, stdin):
aws_module = utils_module.AnsibleAWSModule(argument_spec=dict())
assert aws_module.client(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN
- assert get_aws_connection_info.call_args == call(aws_module, boto3=True)
+ assert get_aws_connection_info.call_args == call(aws_module)
assert boto3_conn.call_args == call(
aws_module,
conn_type="client",
@@ -153,7 +153,7 @@ def test_client_wrapper(monkeypatch, stdin):
wrapped_conn = aws_module.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER)
assert wrapped_conn.client is sentinel.BOTO3_CONN
assert wrapped_conn.retry is sentinel.PARAM_WRAPPER
- assert get_aws_connection_info.call_args == call(aws_module, boto3=True)
+ assert get_aws_connection_info.call_args == call(aws_module)
assert boto3_conn.call_args == call(
aws_module,
conn_type="client",
@@ -166,7 +166,7 @@ def test_client_wrapper(monkeypatch, stdin):
wrapped_conn = aws_module.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER, region=sentinel.PARAM_REGION)
assert wrapped_conn.client is sentinel.BOTO3_CONN
assert wrapped_conn.retry is sentinel.PARAM_WRAPPER
- assert get_aws_connection_info.call_args == call(aws_module, boto3=True)
+ assert get_aws_connection_info.call_args == call(aws_module)
assert boto3_conn.call_args == call(
aws_module,
conn_type="client",
@@ -188,7 +188,7 @@ def test_resource(monkeypatch, stdin):
aws_module = utils_module.AnsibleAWSModule(argument_spec=dict())
assert aws_module.resource(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN
- assert get_aws_connection_info.call_args == call(aws_module, boto3=True)
+ assert get_aws_connection_info.call_args == call(aws_module)
assert boto3_conn.call_args == call(
aws_module,
conn_type="resource",
@@ -199,7 +199,7 @@ def test_resource(monkeypatch, stdin):
# Check that we can override parameters
assert aws_module.resource(sentinel.PARAM_SERVICE, region=sentinel.PARAM_REGION) is sentinel.BOTO3_CONN
- assert get_aws_connection_info.call_args == call(aws_module, boto3=True)
+ assert get_aws_connection_info.call_args == call(aws_module)
assert boto3_conn.call_args == call(
aws_module,
conn_type="resource",
diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py
deleted file mode 100644
index 8829f332c..000000000
--- a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# (c) 2022 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from ansible_collections.amazon.aws.plugins.module_utils.policy import sort_json_policy_dict
-
-
-def test_nothing_to_sort():
- simple_dict = {"key1": "a"}
- nested_dict = {"key1": {"key2": "a"}}
- very_nested_dict = {"key1": {"key2": {"key3": "a"}}}
- assert sort_json_policy_dict(simple_dict) == simple_dict
- assert sort_json_policy_dict(nested_dict) == nested_dict
- assert sort_json_policy_dict(very_nested_dict) == very_nested_dict
-
-
-def test_basic_sort():
- simple_dict = {"key1": [1, 2, 3, 4], "key2": [9, 8, 7, 6]}
- sorted_dict = {"key1": [1, 2, 3, 4], "key2": [6, 7, 8, 9]}
- assert sort_json_policy_dict(simple_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
- simple_dict = {"key1": ["a", "b", "c", "d"], "key2": ["z", "y", "x", "w"]}
- sorted_dict = {"key1": ["a", "b", "c", "d"], "key2": ["w", "x", "y", "z"]}
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
-
-
-def test_nested_list_sort():
- nested_dict = {"key1": {"key2": [9, 8, 7, 6]}}
- sorted_dict = {"key1": {"key2": [6, 7, 8, 9]}}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
- nested_dict = {"key1": {"key2": ["z", "y", "x", "w"]}}
- sorted_dict = {"key1": {"key2": ["w", "x", "y", "z"]}}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
-
-
-def test_nested_dict_list_sort():
- nested_dict = {"key1": {"key2": {"key3": [9, 8, 7, 6]}}}
- sorted_dict = {"key1": {"key2": {"key3": [6, 7, 8, 9]}}}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
- nested_dict = {"key1": {"key2": {"key3": ["z", "y", "x", "w"]}}}
- sorted_dict = {"key1": {"key2": {"key3": ["w", "x", "y", "z"]}}}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
-
-
-def test_list_of_dict_sort():
- nested_dict = {"key1": [{"key2": [4, 3, 2, 1]}, {"key3": [9, 8, 7, 6]}]}
- sorted_dict = {"key1": [{"key2": [1, 2, 3, 4]}, {"key3": [6, 7, 8, 9]}]}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
-
-
-def test_list_of_list_sort():
- nested_dict = {"key1": [[4, 3, 2, 1], [9, 8, 7, 6]]}
- sorted_dict = {"key1": [[1, 2, 3, 4], [6, 7, 8, 9]]}
- assert sort_json_policy_dict(nested_dict) == sorted_dict
- assert sort_json_policy_dict(sorted_dict) == sorted_dict
diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py
index d7293f0ce..0d2f3c153 100644
--- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py
+++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py
@@ -6,6 +6,8 @@
from unittest.mock import MagicMock
+import pytest
+
from ansible_collections.amazon.aws.plugins.module_utils import elbv2
one_action = [
@@ -159,3 +161,138 @@ class TestElBV2Utils:
actual_elb_attributes = self.elbv2obj.get_elb_attributes()
# Assert we got the expected result
assert actual_elb_attributes == expected_elb_attributes
+
+
+class TestELBListeners:
+ DEFAULT_PORT = 80
+ DEFAULT_PROTOCOL = "TCP"
+
+ def createListener(self, **kwargs):
+ result = {"Port": self.DEFAULT_PORT, "Protocol": self.DEFAULT_PROTOCOL}
+ if kwargs.get("port"):
+ result["Port"] = kwargs.get("port")
+ if kwargs.get("protocol"):
+ result["Protocol"] = kwargs.get("protocol")
+ if kwargs.get("certificate_arn") and kwargs.get("protocol") in ("TLS", "HTTPS"):
+ result["Certificates"] = [{"CertificateArn": kwargs.get("certificate_arn")}]
+ if kwargs.get("sslPolicy") and kwargs.get("protocol") in ("TLS", "HTTPS"):
+ result["SslPolicy"] = kwargs.get("sslPolicy")
+ if kwargs.get("alpnPolicy") and kwargs.get("protocol") == "TLS":
+ result["AlpnPolicy"] = kwargs.get("alpnPolicy")
+ return result
+
+ @pytest.mark.parametrize("current_protocol", ["TCP", "TLS", "UDP"])
+ @pytest.mark.parametrize(
+ "current_alpn,new_alpn",
+ [
+ (None, "None"),
+ (None, "HTTP1Only"),
+ ("HTTP1Only", "HTTP2Only"),
+ ("HTTP1Only", "HTTP1Only"),
+ ],
+ )
+ def test__compare_listener_alpn_policy(self, current_protocol, current_alpn, new_alpn):
+ current_listener = self.createListener(protocol=current_protocol, alpnPolicy=[current_alpn])
+ new_listener = self.createListener(protocol="TLS", alpnPolicy=[new_alpn])
+ result = None
+ if current_protocol != "TLS":
+ result = {"Protocol": "TLS"}
+ if new_alpn and any((current_protocol != "TLS", not current_alpn, current_alpn and current_alpn != new_alpn)):
+ result = result or {}
+ result["AlpnPolicy"] = [new_alpn]
+
+ assert result == elbv2.ELBListeners._compare_listener(current_listener, new_listener)
+
+ @pytest.mark.parametrize(
+ "current_protocol,new_protocol",
+ [
+ ("TCP", "TCP"),
+ ("TLS", "HTTPS"),
+ ("HTTPS", "HTTPS"),
+ ("TLS", "TLS"),
+ ("HTTPS", "TLS"),
+ ("HTTPS", "TCP"),
+ ("TLS", "TCP"),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "current_ssl,new_ssl",
+ [
+ (None, "ELBSecurityPolicy-TLS-1-0-2015-04"),
+ ("ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06", "ELBSecurityPolicy-TLS-1-0-2015-04"),
+ ("ELBSecurityPolicy-TLS-1-0-2015-04", None),
+ ("ELBSecurityPolicy-TLS-1-0-2015-04", "ELBSecurityPolicy-TLS-1-0-2015-04"),
+ ],
+ )
+ def test__compare_listener_sslpolicy(self, current_protocol, new_protocol, current_ssl, new_ssl):
+ current_listener = self.createListener(protocol=current_protocol, sslPolicy=current_ssl)
+
+ new_listener = self.createListener(protocol=new_protocol, sslPolicy=new_ssl)
+
+ expected = None
+ if new_protocol != current_protocol:
+ expected = {"Protocol": new_protocol}
+ if new_protocol in ("HTTPS", "TLS") and new_ssl and new_ssl != current_ssl:
+ expected = expected or {}
+ expected["SslPolicy"] = new_ssl
+ assert expected == elbv2.ELBListeners._compare_listener(current_listener, new_listener)
+
+ @pytest.mark.parametrize(
+ "current_protocol,new_protocol",
+ [
+ ("TCP", "TCP"),
+ ("TLS", "HTTPS"),
+ ("HTTPS", "HTTPS"),
+ ("TLS", "TLS"),
+ ("HTTPS", "TLS"),
+ ("HTTPS", "TCP"),
+ ("TLS", "TCP"),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "current_certificate,new_certificate",
+ [
+ (None, "arn:aws:iam::012345678901:server-certificate/ansible-test-1"),
+ (
+ "arn:aws:iam::012345678901:server-certificate/ansible-test-1",
+ "arn:aws:iam::012345678901:server-certificate/ansible-test-2",
+ ),
+ ("arn:aws:iam::012345678901:server-certificate/ansible-test-1", None),
+ (
+ "arn:aws:iam::012345678901:server-certificate/ansible-test-1",
+ "arn:aws:iam::012345678901:server-certificate/ansible-test-1",
+ ),
+ ],
+ )
+ def test__compare_listener_certificates(self, current_protocol, new_protocol, current_certificate, new_certificate):
+ current_listener = self.createListener(protocol=current_protocol, certificate_arn=current_certificate)
+
+ new_listener = self.createListener(protocol=new_protocol, certificate_arn=new_certificate)
+
+ expected = None
+ if new_protocol != current_protocol:
+ expected = {"Protocol": new_protocol}
+ if new_protocol in ("HTTPS", "TLS") and new_certificate and new_certificate != current_certificate:
+ expected = expected or {}
+ expected["Certificates"] = [{"CertificateArn": new_certificate}]
+ assert expected == elbv2.ELBListeners._compare_listener(current_listener, new_listener)
+
+ @pytest.mark.parametrize(
+ "are_equals",
+ [True, False],
+ )
+ def test__compare_listener_port(self, are_equals):
+ current_listener = self.createListener()
+ new_port = MagicMock() if not are_equals else None
+ new_listener = self.createListener(port=new_port)
+
+ result = elbv2.ELBListeners._compare_listener(current_listener, new_listener)
+ expected = None
+ if not are_equals:
+ expected = {"Port": new_port}
+ assert result == expected
+
+ def test_ensure_listeners_alpn_policy(self):
+ listeners = [{"Port": self.DEFAULT_PORT, "AlpnPolicy": "HTTP2Optional"}]
+ expected = [{"Port": self.DEFAULT_PORT, "AlpnPolicy": ["HTTP2Optional"]}]
+ assert expected == elbv2.ELBListeners._ensure_listeners_alpn_policy(listeners)
diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_event.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_event.py
new file mode 100644
index 000000000..c292329b4
--- /dev/null
+++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_event.py
@@ -0,0 +1,544 @@
+#
+# (c) 2024 Red Hat Inc.
+#
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from contextlib import nullcontext as does_not_raise
+from copy import deepcopy
+from unittest.mock import MagicMock
+from unittest.mock import patch
+
+import pytest
+
+from ansible_collections.amazon.aws.plugins.modules.lambda_event import get_qualifier
+from ansible_collections.amazon.aws.plugins.modules.lambda_event import lambda_event_stream
+from ansible_collections.amazon.aws.plugins.modules.lambda_event import set_default_values
+from ansible_collections.amazon.aws.plugins.modules.lambda_event import validate_params
+
+mock_get_qualifier = "ansible_collections.amazon.aws.plugins.modules.lambda_event.get_qualifier"
+mock_camel_dict_to_snake_dict = "ansible_collections.amazon.aws.plugins.modules.lambda_event.camel_dict_to_snake_dict"
+
+
+@pytest.fixture
+def ansible_aws_module():
+ module = MagicMock()
+ module.check_mode = False
+ module.params = {
+ "state": "present",
+ "lambda_function_arn": None,
+ "event_source": "sqs",
+ "source_params": {
+ "source_arn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "batch_size": 200,
+ "starting_position": "LATEST",
+ },
+ "alias": None,
+ "version": 0,
+ }
+ module.exit_json = MagicMock()
+ module.exit_json.side_effect = SystemExit(1)
+ module.fail_json_aws = MagicMock()
+ module.fail_json_aws.side_effect = SystemExit(2)
+ module.fail_json = MagicMock()
+ module.fail_json.side_effect = SystemExit(2)
+ module.client = MagicMock()
+ module.client.return_value = MagicMock()
+ module.boolean = MagicMock()
+ module.boolean.side_effect = lambda x: x.lower() in ["true", "1", "t", "y", "yes"]
+ return module
+
+
+@pytest.mark.parametrize(
+ "module_params,expected",
+ [
+ ({"version": 1}, "1"),
+ ({"alias": "ansible-test"}, "ansible-test"),
+ ({"version": 1, "alias": "ansible-test"}, "1"),
+ ({}, None),
+ ],
+)
+def test_get_qualifier(ansible_aws_module, module_params, expected):
+ ansible_aws_module.params.update(module_params)
+ assert get_qualifier(ansible_aws_module) == expected
+
+
+@pytest.mark.parametrize(
+ "function_name,error_msg",
+ [
+ (
+ "invalid+function+name",
+ "Function name invalid+function+name is invalid. Names must contain only alphanumeric characters and hyphens.",
+ ),
+ (
+ "this_invalid_function_name_has_more_than_64_character_limit_this_is_not_allowed_by_the_module",
+ 'Function name "this_invalid_function_name_has_more_than_64_character_limit_this_is_not_allowed_by_the_module" exceeds 64 character limit',
+ ),
+ (
+ "arn:aws:lambda:us-east-2:123456789012:function:ansible-test-ansible-test-ansible-test-sqs-lambda-function:"
+ "ansible-test-ansible-test-ansible-test-sqs-lambda-function-alias",
+ 'ARN "arn:aws:lambda:us-east-2:123456789012:function:ansible-test-ansible-test-ansible-test-sqs-lambda-function:'
+ 'ansible-test-ansible-test-ansible-test-sqs-lambda-function-alias" exceeds 140 character limit',
+ ),
+ ],
+)
+def test_validate_params_function_name_errors(ansible_aws_module, function_name, error_msg):
+ ansible_aws_module.params.update({"lambda_function_arn": function_name})
+ client = MagicMock()
+ client.get_function = MagicMock()
+ client.get_function.return_value = {}
+ with pytest.raises(SystemExit):
+ validate_params(ansible_aws_module, client)
+
+ ansible_aws_module.fail_json.assert_called_once_with(msg=error_msg)
+
+
+@pytest.mark.parametrize(
+ "qualifier",
+ [None, "ansible-test"],
+)
+@patch(mock_get_qualifier)
+def test_validate_params_with_function_arn(m_get_qualifier, ansible_aws_module, qualifier):
+ function_name = "arn:aws:lambda:us-east-2:123456789012:function:sqs_consumer"
+ ansible_aws_module.params.update({"lambda_function_arn": function_name})
+ m_get_qualifier.return_value = qualifier
+
+ client = MagicMock()
+ client.get_function = MagicMock()
+ client.get_function.return_value = {}
+
+ params = deepcopy(ansible_aws_module.params)
+ params["lambda_function_arn"] = f"{function_name}:{qualifier}" if qualifier else function_name
+
+ validate_params(ansible_aws_module, client)
+ assert params == ansible_aws_module.params
+ m_get_qualifier.assert_called_once()
+
+
+@pytest.mark.parametrize(
+ "qualifier",
+ [None, "ansible-test"],
+)
+@patch(mock_get_qualifier)
+def test_validate_params_with_function_name(m_get_qualifier, ansible_aws_module, qualifier):
+ function_arn = "arn:aws:lambda:us-east-2:123456789012:function:sqs_consumer"
+ function_name = "sqs_consumer"
+ ansible_aws_module.params.update({"lambda_function_arn": function_name})
+ m_get_qualifier.return_value = qualifier
+
+ client = MagicMock()
+ client.get_function = MagicMock()
+ client.get_function.return_value = {
+ "Configuration": {"FunctionArn": function_arn},
+ }
+
+ params = deepcopy(ansible_aws_module.params)
+ params["lambda_function_arn"] = function_arn
+
+ validate_params(ansible_aws_module, client)
+
+ assert params == ansible_aws_module.params
+ m_get_qualifier.assert_called_once()
+ api_params = {"FunctionName": function_name}
+ if qualifier:
+ api_params.update({"Qualifier": qualifier})
+ client.get_function.assert_called_once_with(**api_params)
+
+
+EventSourceMappings = [
+ {
+ "BatchSize": 10,
+ "EventSourceArn": "arn:aws:sqs:us-east-2:123456789012:sqs_consumer",
+ "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:sqs_consumer",
+ "LastModified": "2024-02-08T15:24:57.014000+01:00",
+ "MaximumBatchingWindowInSeconds": 0,
+ "State": "Enabled",
+ "StateTransitionReason": "USER_INITIATED",
+ "UUID": "3ab96d4c-b0c4-4885-87d0-f58cb9c0a4cc",
+ }
+]
+
+
+@pytest.mark.parametrize(
+ "check_mode",
+ [True, False],
+)
+@pytest.mark.parametrize(
+ "existing_event_source",
+ [True, False],
+)
+@patch(mock_camel_dict_to_snake_dict)
+def test_lambda_event_stream_with_state_absent(
+ mock_camel_dict_to_snake_dict, ansible_aws_module, check_mode, existing_event_source
+):
+ function_name = "sqs_consumer"
+ ansible_aws_module.params.update({"lambda_function_arn": function_name, "state": "absent"})
+ ansible_aws_module.check_mode = check_mode
+
+ client = MagicMock()
+ client.list_event_source_mappings = MagicMock()
+
+ client.list_event_source_mappings.return_value = {
+ "EventSourceMappings": EventSourceMappings if existing_event_source else []
+ }
+ client.delete_event_source_mapping = MagicMock()
+ event_source_deleted = {"msg": "event source successfully deleted."}
+ client.delete_event_source_mapping.return_value = event_source_deleted
+ mock_camel_dict_to_snake_dict.side_effect = lambda x: x
+
+ events = []
+ changed = False
+ result = lambda_event_stream(ansible_aws_module, client)
+ changed = existing_event_source
+ if existing_event_source:
+ events = EventSourceMappings
+ if not check_mode:
+ events = event_source_deleted
+ client.delete_event_source_mapping.assert_called_once_with(UUID=EventSourceMappings[0]["UUID"])
+ else:
+ client.delete_event_source_mapping.assert_not_called()
+ assert dict(changed=changed, events=events) == result
+
+
+def test_lambda_event_stream_create_event_missing_starting_position(ansible_aws_module):
+ ansible_aws_module.params = {
+ "state": "present",
+ "lambda_function_arn": "sqs_consumer",
+ "event_source": "stream",
+ "source_params": {
+ "source_arn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "maximum_batching_window_in_seconds": 1,
+ "batch_size": 200,
+ },
+ "alias": None,
+ "version": 0,
+ }
+
+ client = MagicMock()
+ client.list_event_source_mappings = MagicMock()
+ client.list_event_source_mappings.return_value = {"EventSourceMappings": []}
+
+ error_message = "Source parameter 'starting_position' is required for stream event notification."
+ with pytest.raises(SystemExit):
+ lambda_event_stream(ansible_aws_module, client)
+ ansible_aws_module.fail_json.assert_called_once_with(msg=error_message)
+
+
+@pytest.mark.parametrize(
+ "check_mode",
+ [True, False],
+)
+@pytest.mark.parametrize(
+ "module_params,api_params",
+ [
+ (
+ {
+ "state": "present",
+ "lambda_function_arn": "sqs_consumer",
+ "event_source": "stream",
+ "source_params": {
+ "source_arn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "maximum_batching_window_in_seconds": 1,
+ "batch_size": 250,
+ "starting_position": "END",
+ "function_response_types": ["ReportBatchItemFailures"],
+ },
+ "alias": None,
+ "version": 0,
+ },
+ {
+ "FunctionName": "sqs_consumer",
+ "EventSourceArn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "StartingPosition": "END",
+ "Enabled": True,
+ "MaximumBatchingWindowInSeconds": 1,
+ "BatchSize": 250,
+ "FunctionResponseTypes": ["ReportBatchItemFailures"],
+ },
+ ),
+ (
+ {
+ "state": "present",
+ "lambda_function_arn": "sqs_consumer",
+ "event_source": "stream",
+ "source_params": {
+ "source_arn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "maximum_batching_window_in_seconds": 1,
+ "batch_size": 250,
+ "starting_position": "END",
+ "function_response_types": ["ReportBatchItemFailures"],
+ "enabled": "no",
+ },
+ "alias": None,
+ "version": 0,
+ },
+ {
+ "FunctionName": "sqs_consumer",
+ "EventSourceArn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "StartingPosition": "END",
+ "Enabled": False,
+ "MaximumBatchingWindowInSeconds": 1,
+ "BatchSize": 250,
+ "FunctionResponseTypes": ["ReportBatchItemFailures"],
+ },
+ ),
+ (
+ {
+ "state": "present",
+ "lambda_function_arn": "sqs_consumer",
+ "event_source": "sqs",
+ "source_params": {
+ "source_arn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "maximum_batching_window_in_seconds": 1,
+ "batch_size": 101,
+ },
+ "alias": None,
+ "version": 0,
+ },
+ {
+ "FunctionName": "sqs_consumer",
+ "EventSourceArn": "arn:aws:sqs:us-east-2:123456789012:ansible-test-sqs",
+ "Enabled": True,
+ "MaximumBatchingWindowInSeconds": 1,
+ "BatchSize": 101,
+ },
+ ),
+ ],
+)
+@patch(mock_camel_dict_to_snake_dict)
+def test_lambda_event_stream_create_event(
+ mock_camel_dict_to_snake_dict, ansible_aws_module, check_mode, module_params, api_params
+):
+ ansible_aws_module.params = module_params
+ ansible_aws_module.check_mode = check_mode
+
+ client = MagicMock()
+ client.list_event_source_mappings = MagicMock()
+ client.list_event_source_mappings.return_value = {"EventSourceMappings": []}
+
+ client.create_event_source_mapping = MagicMock()
+ event_source_created = {"msg": "event source successfully created."}
+ client.create_event_source_mapping.return_value = event_source_created
+ mock_camel_dict_to_snake_dict.side_effect = lambda x: x
+
+ result = lambda_event_stream(ansible_aws_module, client)
+
+ events = []
+
+ if not check_mode:
+ events = event_source_created
+ client.create_event_source_mapping.assert_called_once_with(**api_params)
+ else:
+ client.create_event_source_mapping.assert_not_called()
+
+ assert dict(changed=True, events=events) == result
+
+
+@pytest.mark.parametrize(
+ "check_mode",
+ [True, False],
+)
+@pytest.mark.parametrize(
+ "module_source_params,current_mapping,api_params",
+ [
+ (
+ {"batch_size": 100, "enabled": "false"},
+ {"BatchSize": 120, "State": "Enabled"},
+ {"BatchSize": 100, "Enabled": False},
+ ),
+ (
+ {"batch_size": 100, "enabled": "true"},
+ {"BatchSize": 100, "State": "Enabled"},
+ {},
+ ),
+ ],
+)
+@patch(mock_camel_dict_to_snake_dict)
+def test_lambda_event_stream_update_event(
+ mock_camel_dict_to_snake_dict, ansible_aws_module, check_mode, module_source_params, current_mapping, api_params
+):
+ function_name = "ansible-test-update-event-function"
+ ansible_aws_module.params.update({"lambda_function_arn": function_name})
+ ansible_aws_module.params["source_params"].update(module_source_params)
+ ansible_aws_module.check_mode = check_mode
+
+ client = MagicMock()
+ client.list_event_source_mappings = MagicMock()
+ existing_event_source = deepcopy(EventSourceMappings)
+ existing_event_source[0].update(current_mapping)
+ client.list_event_source_mappings.return_value = {"EventSourceMappings": existing_event_source}
+
+ client.update_event_source_mapping = MagicMock()
+ event_source_updated = {"msg": "event source successfully updated."}
+ client.update_event_source_mapping.return_value = event_source_updated
+ mock_camel_dict_to_snake_dict.side_effect = lambda x: x
+
+ result = lambda_event_stream(ansible_aws_module, client)
+ if not api_params:
+ assert dict(changed=False, events=existing_event_source) == result
+ client.update_event_source_mapping.assert_not_called()
+ elif check_mode:
+ assert dict(changed=True, events=existing_event_source) == result
+ client.update_event_source_mapping.assert_not_called()
+ else:
+ api_params.update({"FunctionName": function_name, "UUID": existing_event_source[0]["UUID"]})
+ assert dict(changed=True, events=event_source_updated) == result
+ client.update_event_source_mapping.assert_called_once_with(**api_params)
+
+
+@pytest.mark.parametrize(
+ "params, expected, exception, message, source_type",
+ [
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052.fifo",
+ "enabled": True,
+ "batch_size": 100,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ None,
+ pytest.raises(SystemExit),
+ "For FIFO queues the maximum batch_size is 10.",
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052.fifo",
+ "enabled": True,
+ "batch_size": 10,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": 1,
+ },
+ None,
+ pytest.raises(SystemExit),
+ "maximum_batching_window_in_seconds is not supported by Amazon SQS FIFO event sources.",
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052.fifo",
+ "enabled": True,
+ "batch_size": 10,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052.fifo",
+ "enabled": True,
+ "batch_size": 10,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ does_not_raise(),
+ None,
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 11000,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ None,
+ pytest.raises(SystemExit),
+ "For standard queue batch_size must be lower than 10000.",
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 100,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 100,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": 1,
+ },
+ does_not_raise(),
+ None,
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 100,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": 1,
+ },
+ does_not_raise(),
+ None,
+ "stream",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "starting_position": None,
+ "function_response_types": None,
+ },
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 10,
+ "starting_position": None,
+ "function_response_types": None,
+ },
+ does_not_raise(),
+ None,
+ "sqs",
+ ),
+ (
+ {
+ "source_arn": "arn:aws:sqs:us-east-1:123456789012:ansible-test-28277052",
+ "enabled": True,
+ "batch_size": 10,
+ "starting_position": None,
+ "function_response_types": None,
+ "maximum_batching_window_in_seconds": None,
+ },
+ None,
+ pytest.raises(SystemExit),
+ "batch_size for streams must be between 100 and 10000",
+ "stream",
+ ),
+ ],
+)
+def test__set_default_values(params, expected, exception, message, source_type):
+ result = None
+ module = MagicMock()
+ module.check_mode = False
+ module.params = {
+ "event_source": source_type,
+ "source_params": params,
+ }
+ module.fail_json = MagicMock()
+ module.fail_json.side_effect = SystemExit(message)
+ with exception as e:
+ result = set_default_values(module, params)
+ assert message is None or message in str(e)
+ if expected is not None:
+ assert result == expected
diff --git a/ansible_collections/amazon/aws/tox.ini b/ansible_collections/amazon/aws/tox.ini
index 179ed761c..ef66e76ea 100644
--- a/ansible_collections/amazon/aws/tox.ini
+++ b/ansible_collections/amazon/aws/tox.ini
@@ -74,6 +74,16 @@ deps =
commands =
flake8 {posargs} {[common]format_dirs}
+[testenv:pylint-lint]
+# Additional pylint tests that ansible-test currently ignores
+deps =
+ pylint
+commands =
+ pylint \
+ --disable R,C,W,E \
+ --enable consider-using-dict-items,assignment-from-no-return,no-else-continue,no-else-break,simplifiable-if-statement,pointless-string-statement,redefined-outer-name,redefined-builtin \
+ {toxinidir}/plugins
+
[testenv:flynt]
deps =
flynt
@@ -84,7 +94,7 @@ commands =
deps =
flynt
commands =
- flynt --dry-run {[common]format_dirs}
+ flynt --dry-run --fail-on-change {[common]format_dirs}
[testenv:linters]
deps =